luanafelbarros
commited on
Commit
•
71d21a9
1
Parent(s):
5b5233c
Add new SentenceTransformer model
Browse files- 1_Pooling/config.json +10 -0
- README.md +692 -0
- config.json +31 -0
- config_sentence_transformers.json +10 -0
- model.safetensors +3 -0
- modules.json +14 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +55 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 768,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,692 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
- multilingual
|
5 |
+
- ar
|
6 |
+
- bg
|
7 |
+
- ca
|
8 |
+
- cs
|
9 |
+
- da
|
10 |
+
- de
|
11 |
+
- el
|
12 |
+
- es
|
13 |
+
- et
|
14 |
+
- fa
|
15 |
+
- fi
|
16 |
+
- fr
|
17 |
+
- gl
|
18 |
+
- gu
|
19 |
+
- he
|
20 |
+
- hi
|
21 |
+
- hr
|
22 |
+
- hu
|
23 |
+
- hy
|
24 |
+
- id
|
25 |
+
- it
|
26 |
+
- ja
|
27 |
+
- ka
|
28 |
+
- ko
|
29 |
+
- ku
|
30 |
+
- lt
|
31 |
+
- lv
|
32 |
+
- mk
|
33 |
+
- mn
|
34 |
+
- mr
|
35 |
+
- ms
|
36 |
+
- my
|
37 |
+
- nb
|
38 |
+
- nl
|
39 |
+
- pl
|
40 |
+
- pt
|
41 |
+
- ro
|
42 |
+
- ru
|
43 |
+
- sk
|
44 |
+
- sl
|
45 |
+
- sq
|
46 |
+
- sr
|
47 |
+
- sv
|
48 |
+
- th
|
49 |
+
- tr
|
50 |
+
- uk
|
51 |
+
- ur
|
52 |
+
- vi
|
53 |
+
- zh
|
54 |
+
tags:
|
55 |
+
- sentence-transformers
|
56 |
+
- sentence-similarity
|
57 |
+
- feature-extraction
|
58 |
+
- generated_from_trainer
|
59 |
+
- dataset_size:3560698
|
60 |
+
- loss:ModifiedMatryoshkaLoss
|
61 |
+
- loss:MSELoss
|
62 |
+
base_model: google-bert/bert-base-multilingual-cased
|
63 |
+
widget:
|
64 |
+
- source_sentence: We cope with this pressure by having brains, and within our brains,
|
65 |
+
decision-making centers that I've called here the "Actor."
|
66 |
+
sentences:
|
67 |
+
- Nós lidamos com esta pressão porque temos cérebro, e dentro do nosso cérebro,
|
68 |
+
centros de tomada de decisão a que eu chamei aqui o "Ator".
|
69 |
+
- Isto significa que o Crítico deve ter falado naquele animal, e que o Crítico deve
|
70 |
+
estar contido entre os neurónios produtores de dopamina na esquerda, mas não nos
|
71 |
+
neurónios produtores de dopamina na direita.
|
72 |
+
- Na ressonância magnética e na espetroscopia de MR — a atividade do tumor está
|
73 |
+
a vermelho —
|
74 |
+
- source_sentence: Once it's a closed system, you will have legal liability if you
|
75 |
+
do not urge your CEO to get the maximum income from reducing and trading the carbon
|
76 |
+
emissions that can be avoided.
|
77 |
+
sentences:
|
78 |
+
- (Risas) Espero que las conversaciones aquí en TED me ayuden a terminarla.
|
79 |
+
- Una vez que es un sistema cerrado, tendrán responsabilidad legal si no exhortan
|
80 |
+
a su ejecutivo en jefe a obtener el máximo ingreso de la reducción y comercialización
|
81 |
+
de emisiones de carbono que pueden ser evitadas.
|
82 |
+
- Pero también son muy efectivas en desviar nuestro camino.
|
83 |
+
- source_sentence: Whenever it comes up to the midpoint, it pauses, it carefully scans
|
84 |
+
the odor interface as if it was sniffing out its environment, and then it turns
|
85 |
+
around.
|
86 |
+
sentences:
|
87 |
+
- Tiene que decidir si dar la vuelta y quedarse en el mismo olor, o si cruzar la
|
88 |
+
línea del medio y probar algo nuevo.
|
89 |
+
- Ésta es una oportunidad.
|
90 |
+
- Cada vez que llega al medio, se detiene analiza con cuidado la interfaz de olor,
|
91 |
+
como si estuviera olfateando su entorno, y luego da la vuelta.
|
92 |
+
- source_sentence: You've seen the documentaries of sweatshops making garments all
|
93 |
+
over the world, even in developed countries.
|
94 |
+
sentences:
|
95 |
+
- No llegaron muy lejos, obviamente.
|
96 |
+
- Uds ya han visto documentales de los talleres de confección de prendas en todo
|
97 |
+
el mundo, incluso en los países desarrollados.
|
98 |
+
- Y los maestros también están frustrados.
|
99 |
+
- source_sentence: It's hands-on, it's in-your-face, it requires an active engagement,
|
100 |
+
and it allows kids to apply all the core subject learning in real ways.
|
101 |
+
sentences:
|
102 |
+
- É prático, é presencial, isso requer uma participação ativa, e permite que as
|
103 |
+
crianças apliquem todos os tópicos importantes de aprendizagem de forma real.
|
104 |
+
- E no mundo do áudio que é quando o microfone fica muito perto da origem do som,
|
105 |
+
e então ele entra nessa repetição auto-destrutiva que cria um som muito desagradável.
|
106 |
+
- Vamos encarar a realidade, o contrato de uma grande marca multinacional para um
|
107 |
+
fornecedor na Índia ou China tem um poder persuasivo muito maior do que as leis
|
108 |
+
locais de trabalho, do que as regras ambientais locais, do que os padrões locais
|
109 |
+
de Direitos Humanos.
|
110 |
+
datasets:
|
111 |
+
- sentence-transformers/parallel-sentences-talks
|
112 |
+
pipeline_tag: sentence-similarity
|
113 |
+
library_name: sentence-transformers
|
114 |
+
metrics:
|
115 |
+
- negative_mse
|
116 |
+
model-index:
|
117 |
+
- name: SentenceTransformer based on google-bert/bert-base-multilingual-cased
|
118 |
+
results:
|
119 |
+
- task:
|
120 |
+
type: knowledge-distillation
|
121 |
+
name: Knowledge Distillation
|
122 |
+
dataset:
|
123 |
+
name: MSE val en es
|
124 |
+
type: MSE-val-en-es
|
125 |
+
metrics:
|
126 |
+
- type: negative_mse
|
127 |
+
value: -31.554964184761047
|
128 |
+
name: Negative Mse
|
129 |
+
- task:
|
130 |
+
type: knowledge-distillation
|
131 |
+
name: Knowledge Distillation
|
132 |
+
dataset:
|
133 |
+
name: MSE val en pt
|
134 |
+
type: MSE-val-en-pt
|
135 |
+
metrics:
|
136 |
+
- type: negative_mse
|
137 |
+
value: -31.72471523284912
|
138 |
+
name: Negative Mse
|
139 |
+
- task:
|
140 |
+
type: knowledge-distillation
|
141 |
+
name: Knowledge Distillation
|
142 |
+
dataset:
|
143 |
+
name: MSE val en pt br
|
144 |
+
type: MSE-val-en-pt-br
|
145 |
+
metrics:
|
146 |
+
- type: negative_mse
|
147 |
+
value: -30.244168639183044
|
148 |
+
name: Negative Mse
|
149 |
+
---
|
150 |
+
|
151 |
+
# SentenceTransformer based on google-bert/bert-base-multilingual-cased
|
152 |
+
|
153 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [google-bert/bert-base-multilingual-cased](https://huggingface.co/google-bert/bert-base-multilingual-cased) on the en-es, en-pt and [en-pt-br](https://huggingface.co/datasets/sentence-transformers/parallel-sentences-talks) datasets. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
154 |
+
|
155 |
+
## Model Details
|
156 |
+
|
157 |
+
### Model Description
|
158 |
+
- **Model Type:** Sentence Transformer
|
159 |
+
- **Base model:** [google-bert/bert-base-multilingual-cased](https://huggingface.co/google-bert/bert-base-multilingual-cased) <!-- at revision 3f076fdb1ab68d5b2880cb87a0886f315b8146f8 -->
|
160 |
+
- **Maximum Sequence Length:** 128 tokens
|
161 |
+
- **Output Dimensionality:** 768 dimensions
|
162 |
+
- **Similarity Function:** Cosine Similarity
|
163 |
+
- **Training Datasets:**
|
164 |
+
- en-es
|
165 |
+
- en-pt
|
166 |
+
- [en-pt-br](https://huggingface.co/datasets/sentence-transformers/parallel-sentences-talks)
|
167 |
+
- **Languages:** en, multilingual, ar, bg, ca, cs, da, de, el, es, et, fa, fi, fr, gl, gu, he, hi, hr, hu, hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh
|
168 |
+
<!-- - **License:** Unknown -->
|
169 |
+
|
170 |
+
### Model Sources
|
171 |
+
|
172 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
173 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
174 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
175 |
+
|
176 |
+
### Full Model Architecture
|
177 |
+
|
178 |
+
```
|
179 |
+
SentenceTransformer(
|
180 |
+
(0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
|
181 |
+
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
182 |
+
)
|
183 |
+
```
|
184 |
+
|
185 |
+
## Usage
|
186 |
+
|
187 |
+
### Direct Usage (Sentence Transformers)
|
188 |
+
|
189 |
+
First install the Sentence Transformers library:
|
190 |
+
|
191 |
+
```bash
|
192 |
+
pip install -U sentence-transformers
|
193 |
+
```
|
194 |
+
|
195 |
+
Then you can load this model and run inference.
|
196 |
+
```python
|
197 |
+
from sentence_transformers import SentenceTransformer
|
198 |
+
|
199 |
+
# Download from the 🤗 Hub
|
200 |
+
model = SentenceTransformer("luanafelbarros/bert-es-pt-cased-matryoshka")
|
201 |
+
# Run inference
|
202 |
+
sentences = [
|
203 |
+
"It's hands-on, it's in-your-face, it requires an active engagement, and it allows kids to apply all the core subject learning in real ways.",
|
204 |
+
'É prático, é presencial, isso requer uma participação ativa, e permite que as crianças apliquem todos os tópicos importantes de aprendizagem de forma real.',
|
205 |
+
'Vamos encarar a realidade, o contrato de uma grande marca multinacional para um fornecedor na Índia ou China tem um poder persuasivo muito maior do que as leis locais de trabalho, do que as regras ambientais locais, do que os padrões locais de Direitos Humanos.',
|
206 |
+
]
|
207 |
+
embeddings = model.encode(sentences)
|
208 |
+
print(embeddings.shape)
|
209 |
+
# [3, 768]
|
210 |
+
|
211 |
+
# Get the similarity scores for the embeddings
|
212 |
+
similarities = model.similarity(embeddings, embeddings)
|
213 |
+
print(similarities.shape)
|
214 |
+
# [3, 3]
|
215 |
+
```
|
216 |
+
|
217 |
+
<!--
|
218 |
+
### Direct Usage (Transformers)
|
219 |
+
|
220 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
221 |
+
|
222 |
+
</details>
|
223 |
+
-->
|
224 |
+
|
225 |
+
<!--
|
226 |
+
### Downstream Usage (Sentence Transformers)
|
227 |
+
|
228 |
+
You can finetune this model on your own dataset.
|
229 |
+
|
230 |
+
<details><summary>Click to expand</summary>
|
231 |
+
|
232 |
+
</details>
|
233 |
+
-->
|
234 |
+
|
235 |
+
<!--
|
236 |
+
### Out-of-Scope Use
|
237 |
+
|
238 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
239 |
+
-->
|
240 |
+
|
241 |
+
## Evaluation
|
242 |
+
|
243 |
+
### Metrics
|
244 |
+
|
245 |
+
#### Knowledge Distillation
|
246 |
+
|
247 |
+
* Datasets: `MSE-val-en-es`, `MSE-val-en-pt` and `MSE-val-en-pt-br`
|
248 |
+
* Evaluated with [<code>MSEEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.MSEEvaluator)
|
249 |
+
|
250 |
+
| Metric | MSE-val-en-es | MSE-val-en-pt | MSE-val-en-pt-br |
|
251 |
+
|:-----------------|:--------------|:--------------|:-----------------|
|
252 |
+
| **negative_mse** | **-31.555** | **-31.7247** | **-30.2442** |
|
253 |
+
|
254 |
+
<!--
|
255 |
+
## Bias, Risks and Limitations
|
256 |
+
|
257 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
258 |
+
-->
|
259 |
+
|
260 |
+
<!--
|
261 |
+
### Recommendations
|
262 |
+
|
263 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
264 |
+
-->
|
265 |
+
|
266 |
+
## Training Details
|
267 |
+
|
268 |
+
### Training Datasets
|
269 |
+
|
270 |
+
#### en-es
|
271 |
+
|
272 |
+
* Dataset: en-es
|
273 |
+
* Size: 1,612,538 training samples
|
274 |
+
* Columns: <code>english</code>, <code>non_english</code>, and <code>label</code>
|
275 |
+
* Approximate statistics based on the first 1000 samples:
|
276 |
+
| | english | non_english | label |
|
277 |
+
|:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-------------------------------------|
|
278 |
+
| type | string | string | list |
|
279 |
+
| details | <ul><li>min: 4 tokens</li><li>mean: 25.46 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 26.67 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>size: 768 elements</li></ul> |
|
280 |
+
* Samples:
|
281 |
+
| english | non_english | label |
|
282 |
+
|:-------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------|
|
283 |
+
| <code>And then there are certain conceptual things that can also benefit from hand calculating, but I think they're relatively small in number.</code> | <code>Y luego hay ciertas aspectos conceptuales que pueden beneficiarse del cálculo a mano pero creo que son relativamente pocos.</code> | <code>[-0.015244179405272007, 0.04601434990763664, -0.052873335778713226, 0.03535117208957672, -0.039562877267599106, ...]</code> |
|
284 |
+
| <code>One thing I often ask about is ancient Greek and how this relates.</code> | <code>Algo que pregunto a menudo es sobre el griego antiguo y cómo se relaciona.</code> | <code>[0.0012022971641272306, -0.009590390138328075, -0.032977133989334106, 0.017047710716724396, -0.0028919472824782133, ...]</code> |
|
285 |
+
| <code>See, the thing we're doing right now is we're forcing people to learn mathematics.</code> | <code>Vean, lo que estamos haciendo ahora es forzar a la gente a aprender matemáticas.</code> | <code>[-0.019420800730586052, 0.10435999929904938, 0.009455346502363682, -0.02814250998198986, -0.017036104574799538, ...]</code> |
|
286 |
+
* Loss: <code>__main__.ModifiedMatryoshkaLoss</code> with these parameters:
|
287 |
+
```json
|
288 |
+
{
|
289 |
+
"loss": "MSELoss",
|
290 |
+
"matryoshka_dims": [
|
291 |
+
768,
|
292 |
+
512,
|
293 |
+
256,
|
294 |
+
128,
|
295 |
+
64
|
296 |
+
],
|
297 |
+
"matryoshka_weights": [
|
298 |
+
1,
|
299 |
+
1,
|
300 |
+
1,
|
301 |
+
1,
|
302 |
+
1
|
303 |
+
],
|
304 |
+
"n_dims_per_step": -1
|
305 |
+
}
|
306 |
+
```
|
307 |
+
|
308 |
+
#### en-pt
|
309 |
+
|
310 |
+
* Dataset: en-pt
|
311 |
+
* Size: 1,542,353 training samples
|
312 |
+
* Columns: <code>english</code>, <code>non_english</code>, and <code>label</code>
|
313 |
+
* Approximate statistics based on the first 1000 samples:
|
314 |
+
| | english | non_english | label |
|
315 |
+
|:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-------------------------------------|
|
316 |
+
| type | string | string | list |
|
317 |
+
| details | <ul><li>min: 5 tokens</li><li>mean: 24.95 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 27.08 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>size: 768 elements</li></ul> |
|
318 |
+
* Samples:
|
319 |
+
| english | non_english | label |
|
320 |
+
|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------|
|
321 |
+
| <code>And the country that does this first will, in my view, leapfrog others in achieving a new economy even, an improved economy, an improved outlook.</code> | <code>E o país que fizer isto primeiro vai, na minha opinião, ultrapassar outros em alcançar uma nova economia até uma economia melhorada, uma visão melhorada.</code> | <code>[-0.016568265855312347, 0.10754051059484482, -0.025950804352760315, -0.045048732310533524, 0.01812679134309292, ...]</code> |
|
322 |
+
| <code>In fact, I even talk about us moving from what we often call now the "knowledge economy" to what we might call a "computational knowledge economy," where high-level math is integral to what everyone does in the way that knowledge currently is.</code> | <code>De facto, eu até falo de mudarmos do que chamamos hoje a economia do conhecimento para o que poderemos chamar a economia do conhecimento computacional, onde a matemática de alto nível está integrada no que toda a gente faz da forma que o conhecimento actualmente está.</code> | <code>[-0.014394757337868214, 0.11997982114553452, -0.041491635143756866, -0.024539340287446976, 0.01425645500421524, ...]</code> |
|
323 |
+
| <code>We can engage so many more students with this, and they can have a better time doing it.</code> | <code>Podemos cativar tantos mais estudantes com isto, e eles podem divertir-se mais a fazê-lo.</code> | <code>[-0.034232210367918015, 0.04277702793478966, -0.05683526396751404, -0.006559622474014759, -0.00639274762943387, ...]</code> |
|
324 |
+
* Loss: <code>__main__.ModifiedMatryoshkaLoss</code> with these parameters:
|
325 |
+
```json
|
326 |
+
{
|
327 |
+
"loss": "MSELoss",
|
328 |
+
"matryoshka_dims": [
|
329 |
+
768,
|
330 |
+
512,
|
331 |
+
256,
|
332 |
+
128,
|
333 |
+
64
|
334 |
+
],
|
335 |
+
"matryoshka_weights": [
|
336 |
+
1,
|
337 |
+
1,
|
338 |
+
1,
|
339 |
+
1,
|
340 |
+
1
|
341 |
+
],
|
342 |
+
"n_dims_per_step": -1
|
343 |
+
}
|
344 |
+
```
|
345 |
+
|
346 |
+
#### en-pt-br
|
347 |
+
|
348 |
+
* Dataset: [en-pt-br](https://huggingface.co/datasets/sentence-transformers/parallel-sentences-talks) at [0c70bc6](https://huggingface.co/datasets/sentence-transformers/parallel-sentences-talks/tree/0c70bc6714efb1df12f8a16b9056e4653563d128)
|
349 |
+
* Size: 405,807 training samples
|
350 |
+
* Columns: <code>english</code>, <code>non_english</code>, and <code>label</code>
|
351 |
+
* Approximate statistics based on the first 1000 samples:
|
352 |
+
| | english | non_english | label |
|
353 |
+
|:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-------------------------------------|
|
354 |
+
| type | string | string | list |
|
355 |
+
| details | <ul><li>min: 4 tokens</li><li>mean: 25.39 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 27.52 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>size: 768 elements</li></ul> |
|
356 |
+
* Samples:
|
357 |
+
| english | non_english | label |
|
358 |
+
|:-------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------|
|
359 |
+
| <code>And then there are certain conceptual things that can also benefit from hand calculating, but I think they're relatively small in number.</code> | <code>E também existem alguns aspectos conceituais que também podem se beneficiar do cálculo manual, mas eu acho que eles são relativamente poucos.</code> | <code>[-0.015244179405272007, 0.04601434990763664, -0.052873335778713226, 0.03535117208957672, -0.039562877267599106, ...]</code> |
|
360 |
+
| <code>One thing I often ask about is ancient Greek and how this relates.</code> | <code>Uma coisa sobre a qual eu pergunto com frequencia é grego antigo e como ele se relaciona a isto.</code> | <code>[0.0012022971641272306, -0.009590390138328075, -0.032977133989334106, 0.017047710716724396, -0.0028919472824782133, ...]</code> |
|
361 |
+
| <code>See, the thing we're doing right now is we're forcing people to learn mathematics.</code> | <code>Vejam, o que estamos fazendo agora, é que estamos forçando as pessoas a aprender matemática.</code> | <code>[-0.019420800730586052, 0.10435999929904938, 0.009455346502363682, -0.02814250998198986, -0.017036104574799538, ...]</code> |
|
362 |
+
* Loss: <code>__main__.ModifiedMatryoshkaLoss</code> with these parameters:
|
363 |
+
```json
|
364 |
+
{
|
365 |
+
"loss": "MSELoss",
|
366 |
+
"matryoshka_dims": [
|
367 |
+
768,
|
368 |
+
512,
|
369 |
+
256,
|
370 |
+
128,
|
371 |
+
64
|
372 |
+
],
|
373 |
+
"matryoshka_weights": [
|
374 |
+
1,
|
375 |
+
1,
|
376 |
+
1,
|
377 |
+
1,
|
378 |
+
1
|
379 |
+
],
|
380 |
+
"n_dims_per_step": -1
|
381 |
+
}
|
382 |
+
```
|
383 |
+
|
384 |
+
### Evaluation Datasets
|
385 |
+
|
386 |
+
#### en-es
|
387 |
+
|
388 |
+
* Dataset: en-es
|
389 |
+
* Size: 2,990 evaluation samples
|
390 |
+
* Columns: <code>english</code>, <code>non_english</code>, and <code>label</code>
|
391 |
+
* Approximate statistics based on the first 1000 samples:
|
392 |
+
| | english | non_english | label |
|
393 |
+
|:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-------------------------------------|
|
394 |
+
| type | string | string | list |
|
395 |
+
| details | <ul><li>min: 4 tokens</li><li>mean: 25.68 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 27.31 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>size: 768 elements</li></ul> |
|
396 |
+
* Samples:
|
397 |
+
| english | non_english | label |
|
398 |
+
|:-----------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------|
|
399 |
+
| <code>Thank you so much, Chris.</code> | <code>Muchas gracias Chris.</code> | <code>[-0.061677999794483185, -0.04450423642992973, -0.0325058177113533, -0.06641444563865662, 0.003981702029705048, ...]</code> |
|
400 |
+
| <code>And it's truly a great honor to have the opportunity to come to this stage twice; I'm extremely grateful.</code> | <code>Y es en verdad un gran honor tener la oportunidad de venir a este escenario por segunda vez. Estoy extremadamente agradecido.</code> | <code>[0.011398610658943653, -0.02500406838953495, -0.009884772822260857, 0.009336909279227257, 0.0030828709714114666, ...]</code> |
|
401 |
+
| <code>I have been blown away by this conference, and I want to thank all of you for the many nice comments about what I had to say the other night.</code> | <code>He quedado conmovido por esta conferencia, y deseo agradecer a todos ustedes sus amables comentarios acerca de lo que tenía que decir la otra noche.</code> | <code>[-0.03842132166028023, 0.03635749593377113, -0.02491452544927597, -0.0032229204662144184, 0.0003549510147422552, ...]</code> |
|
402 |
+
* Loss: <code>__main__.ModifiedMatryoshkaLoss</code> with these parameters:
|
403 |
+
```json
|
404 |
+
{
|
405 |
+
"loss": "MSELoss",
|
406 |
+
"matryoshka_dims": [
|
407 |
+
768,
|
408 |
+
512,
|
409 |
+
256,
|
410 |
+
128,
|
411 |
+
64
|
412 |
+
],
|
413 |
+
"matryoshka_weights": [
|
414 |
+
1,
|
415 |
+
1,
|
416 |
+
1,
|
417 |
+
1,
|
418 |
+
1
|
419 |
+
],
|
420 |
+
"n_dims_per_step": -1
|
421 |
+
}
|
422 |
+
```
|
423 |
+
|
424 |
+
#### en-pt
|
425 |
+
|
426 |
+
* Dataset: en-pt
|
427 |
+
* Size: 2,992 evaluation samples
|
428 |
+
* Columns: <code>english</code>, <code>non_english</code>, and <code>label</code>
|
429 |
+
* Approximate statistics based on the first 1000 samples:
|
430 |
+
| | english | non_english | label |
|
431 |
+
|:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-------------------------------------|
|
432 |
+
| type | string | string | list |
|
433 |
+
| details | <ul><li>min: 4 tokens</li><li>mean: 25.05 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 27.58 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>size: 768 elements</li></ul> |
|
434 |
+
* Samples:
|
435 |
+
| english | non_english | label |
|
436 |
+
|:-----------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------|
|
437 |
+
| <code>Thank you so much, Chris.</code> | <code>Muito obrigado, Chris.</code> | <code>[-0.06167794018983841, -0.04450422152876854, -0.032505810260772705, -0.06641443818807602, 0.0039817155338823795, ...]</code> |
|
438 |
+
| <code>And it's truly a great honor to have the opportunity to come to this stage twice; I'm extremely grateful.</code> | <code>É realmente uma grande honra ter a oportunidade de pisar este palco pela segunda vez. Estou muito agradecido.</code> | <code>[0.011398610658943653, -0.02500406838953495, -0.009884772822260857, 0.009336909279227257, 0.0030828709714114666, ...]</code> |
|
439 |
+
| <code>I have been blown away by this conference, and I want to thank all of you for the many nice comments about what I had to say the other night.</code> | <code>Fiquei muito impressionado com esta conferência e quero agradecer a todos os imensos comentários simpáticos sobre o que eu tinha a dizer naquela noite.</code> | <code>[-0.03842132166028023, 0.03635749593377113, -0.02491452544927597, -0.0032229204662144184, 0.0003549510147422552, ...]</code> |
|
440 |
+
* Loss: <code>__main__.ModifiedMatryoshkaLoss</code> with these parameters:
|
441 |
+
```json
|
442 |
+
{
|
443 |
+
"loss": "MSELoss",
|
444 |
+
"matryoshka_dims": [
|
445 |
+
768,
|
446 |
+
512,
|
447 |
+
256,
|
448 |
+
128,
|
449 |
+
64
|
450 |
+
],
|
451 |
+
"matryoshka_weights": [
|
452 |
+
1,
|
453 |
+
1,
|
454 |
+
1,
|
455 |
+
1,
|
456 |
+
1
|
457 |
+
],
|
458 |
+
"n_dims_per_step": -1
|
459 |
+
}
|
460 |
+
```
|
461 |
+
|
462 |
+
#### en-pt-br
|
463 |
+
|
464 |
+
* Dataset: [en-pt-br](https://huggingface.co/datasets/sentence-transformers/parallel-sentences-talks) at [0c70bc6](https://huggingface.co/datasets/sentence-transformers/parallel-sentences-talks/tree/0c70bc6714efb1df12f8a16b9056e4653563d128)
|
465 |
+
* Size: 992 evaluation samples
|
466 |
+
* Columns: <code>english</code>, <code>non_english</code>, and <code>label</code>
|
467 |
+
* Approximate statistics based on the first 992 samples:
|
468 |
+
| | english | non_english | label |
|
469 |
+
|:--------|:----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:-------------------------------------|
|
470 |
+
| type | string | string | list |
|
471 |
+
| details | <ul><li>min: 4 tokens</li><li>mean: 25.8 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 28.92 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>size: 768 elements</li></ul> |
|
472 |
+
* Samples:
|
473 |
+
| english | non_english | label |
|
474 |
+
|:-----------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------|
|
475 |
+
| <code>Thank you so much, Chris.</code> | <code>Muito obrigado, Chris.</code> | <code>[-0.0616779662668705, -0.044504180550575256, -0.032505787909030914, -0.06641441583633423, 0.003981734160333872, ...]</code> |
|
476 |
+
| <code>And it's truly a great honor to have the opportunity to come to this stage twice; I'm extremely grateful.</code> | <code>É realmente uma grande honra ter a oportunidade de estar neste palco pela segunda vez. Estou muito agradecido.</code> | <code>[0.011398598551750183, -0.02500401996076107, -0.009884790517389774, 0.009336900897324085, 0.003082842566072941, ...]</code> |
|
477 |
+
| <code>I have been blown away by this conference, and I want to thank all of you for the many nice comments about what I had to say the other night.</code> | <code>Eu fui muito aplaudido por esta conferência e quero agradecer a todos pelos muitos comentários delicados sobre o que eu tinha a dizer naquela noite.</code> | <code>[-0.03842132166028023, 0.03635749593377113, -0.02491452544927597, -0.0032229204662144184, 0.0003549510147422552, ...]</code> |
|
478 |
+
* Loss: <code>__main__.ModifiedMatryoshkaLoss</code> with these parameters:
|
479 |
+
```json
|
480 |
+
{
|
481 |
+
"loss": "MSELoss",
|
482 |
+
"matryoshka_dims": [
|
483 |
+
768,
|
484 |
+
512,
|
485 |
+
256,
|
486 |
+
128,
|
487 |
+
64
|
488 |
+
],
|
489 |
+
"matryoshka_weights": [
|
490 |
+
1,
|
491 |
+
1,
|
492 |
+
1,
|
493 |
+
1,
|
494 |
+
1
|
495 |
+
],
|
496 |
+
"n_dims_per_step": -1
|
497 |
+
}
|
498 |
+
```
|
499 |
+
|
500 |
+
### Training Hyperparameters
|
501 |
+
#### Non-Default Hyperparameters
|
502 |
+
|
503 |
+
- `eval_strategy`: steps
|
504 |
+
- `per_device_train_batch_size`: 256
|
505 |
+
- `per_device_eval_batch_size`: 256
|
506 |
+
- `learning_rate`: 2e-05
|
507 |
+
- `num_train_epochs`: 1
|
508 |
+
- `warmup_ratio`: 0.1
|
509 |
+
- `fp16`: True
|
510 |
+
|
511 |
+
#### All Hyperparameters
|
512 |
+
<details><summary>Click to expand</summary>
|
513 |
+
|
514 |
+
- `overwrite_output_dir`: False
|
515 |
+
- `do_predict`: False
|
516 |
+
- `eval_strategy`: steps
|
517 |
+
- `prediction_loss_only`: True
|
518 |
+
- `per_device_train_batch_size`: 256
|
519 |
+
- `per_device_eval_batch_size`: 256
|
520 |
+
- `per_gpu_train_batch_size`: None
|
521 |
+
- `per_gpu_eval_batch_size`: None
|
522 |
+
- `gradient_accumulation_steps`: 1
|
523 |
+
- `eval_accumulation_steps`: None
|
524 |
+
- `torch_empty_cache_steps`: None
|
525 |
+
- `learning_rate`: 2e-05
|
526 |
+
- `weight_decay`: 0.0
|
527 |
+
- `adam_beta1`: 0.9
|
528 |
+
- `adam_beta2`: 0.999
|
529 |
+
- `adam_epsilon`: 1e-08
|
530 |
+
- `max_grad_norm`: 1.0
|
531 |
+
- `num_train_epochs`: 1
|
532 |
+
- `max_steps`: -1
|
533 |
+
- `lr_scheduler_type`: linear
|
534 |
+
- `lr_scheduler_kwargs`: {}
|
535 |
+
- `warmup_ratio`: 0.1
|
536 |
+
- `warmup_steps`: 0
|
537 |
+
- `log_level`: passive
|
538 |
+
- `log_level_replica`: warning
|
539 |
+
- `log_on_each_node`: True
|
540 |
+
- `logging_nan_inf_filter`: True
|
541 |
+
- `save_safetensors`: True
|
542 |
+
- `save_on_each_node`: False
|
543 |
+
- `save_only_model`: False
|
544 |
+
- `restore_callback_states_from_checkpoint`: False
|
545 |
+
- `no_cuda`: False
|
546 |
+
- `use_cpu`: False
|
547 |
+
- `use_mps_device`: False
|
548 |
+
- `seed`: 42
|
549 |
+
- `data_seed`: None
|
550 |
+
- `jit_mode_eval`: False
|
551 |
+
- `use_ipex`: False
|
552 |
+
- `bf16`: False
|
553 |
+
- `fp16`: True
|
554 |
+
- `fp16_opt_level`: O1
|
555 |
+
- `half_precision_backend`: auto
|
556 |
+
- `bf16_full_eval`: False
|
557 |
+
- `fp16_full_eval`: False
|
558 |
+
- `tf32`: None
|
559 |
+
- `local_rank`: 0
|
560 |
+
- `ddp_backend`: None
|
561 |
+
- `tpu_num_cores`: None
|
562 |
+
- `tpu_metrics_debug`: False
|
563 |
+
- `debug`: []
|
564 |
+
- `dataloader_drop_last`: False
|
565 |
+
- `dataloader_num_workers`: 0
|
566 |
+
- `dataloader_prefetch_factor`: None
|
567 |
+
- `past_index`: -1
|
568 |
+
- `disable_tqdm`: False
|
569 |
+
- `remove_unused_columns`: True
|
570 |
+
- `label_names`: None
|
571 |
+
- `load_best_model_at_end`: False
|
572 |
+
- `ignore_data_skip`: False
|
573 |
+
- `fsdp`: []
|
574 |
+
- `fsdp_min_num_params`: 0
|
575 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
576 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
577 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
578 |
+
- `deepspeed`: None
|
579 |
+
- `label_smoothing_factor`: 0.0
|
580 |
+
- `optim`: adamw_torch
|
581 |
+
- `optim_args`: None
|
582 |
+
- `adafactor`: False
|
583 |
+
- `group_by_length`: False
|
584 |
+
- `length_column_name`: length
|
585 |
+
- `ddp_find_unused_parameters`: None
|
586 |
+
- `ddp_bucket_cap_mb`: None
|
587 |
+
- `ddp_broadcast_buffers`: False
|
588 |
+
- `dataloader_pin_memory`: True
|
589 |
+
- `dataloader_persistent_workers`: False
|
590 |
+
- `skip_memory_metrics`: True
|
591 |
+
- `use_legacy_prediction_loop`: False
|
592 |
+
- `push_to_hub`: False
|
593 |
+
- `resume_from_checkpoint`: None
|
594 |
+
- `hub_model_id`: None
|
595 |
+
- `hub_strategy`: every_save
|
596 |
+
- `hub_private_repo`: False
|
597 |
+
- `hub_always_push`: False
|
598 |
+
- `gradient_checkpointing`: False
|
599 |
+
- `gradient_checkpointing_kwargs`: None
|
600 |
+
- `include_inputs_for_metrics`: False
|
601 |
+
- `include_for_metrics`: []
|
602 |
+
- `eval_do_concat_batches`: True
|
603 |
+
- `fp16_backend`: auto
|
604 |
+
- `push_to_hub_model_id`: None
|
605 |
+
- `push_to_hub_organization`: None
|
606 |
+
- `mp_parameters`:
|
607 |
+
- `auto_find_batch_size`: False
|
608 |
+
- `full_determinism`: False
|
609 |
+
- `torchdynamo`: None
|
610 |
+
- `ray_scope`: last
|
611 |
+
- `ddp_timeout`: 1800
|
612 |
+
- `torch_compile`: False
|
613 |
+
- `torch_compile_backend`: None
|
614 |
+
- `torch_compile_mode`: None
|
615 |
+
- `dispatch_batches`: None
|
616 |
+
- `split_batches`: None
|
617 |
+
- `include_tokens_per_second`: False
|
618 |
+
- `include_num_input_tokens_seen`: False
|
619 |
+
- `neftune_noise_alpha`: None
|
620 |
+
- `optim_target_modules`: None
|
621 |
+
- `batch_eval_metrics`: False
|
622 |
+
- `eval_on_start`: False
|
623 |
+
- `use_liger_kernel`: False
|
624 |
+
- `eval_use_gather_object`: False
|
625 |
+
- `average_tokens_across_devices`: False
|
626 |
+
- `prompts`: None
|
627 |
+
- `batch_sampler`: batch_sampler
|
628 |
+
- `multi_dataset_batch_sampler`: proportional
|
629 |
+
|
630 |
+
</details>
|
631 |
+
|
632 |
+
### Training Logs
|
633 |
+
| Epoch | Step | Training Loss | en-es loss | en-pt loss | en-pt-br loss | MSE-val-en-es_negative_mse | MSE-val-en-pt_negative_mse | MSE-val-en-pt-br_negative_mse |
|
634 |
+
|:------:|:-----:|:-------------:|:----------:|:----------:|:-------------:|:--------------------------:|:--------------------------:|:-----------------------------:|
|
635 |
+
| 0.0719 | 1000 | 0.028 | 0.0237 | 0.0237 | 0.0231 | -24.8296 | -24.6706 | -25.9588 |
|
636 |
+
| 0.1438 | 2000 | 0.0227 | 0.0213 | 0.0215 | 0.0208 | -26.2546 | -26.2964 | -25.9444 |
|
637 |
+
| 0.2157 | 3000 | 0.0213 | 0.0203 | 0.0205 | 0.0199 | -27.7589 | -27.8414 | -27.1460 |
|
638 |
+
| 0.2876 | 4000 | 0.0206 | 0.0197 | 0.0199 | 0.0193 | -29.1241 | -29.2139 | -28.3021 |
|
639 |
+
| 0.3595 | 5000 | 0.0201 | 0.0194 | 0.0195 | 0.0190 | -30.1292 | -30.2692 | -29.0747 |
|
640 |
+
| 0.4313 | 6000 | 0.0198 | 0.0190 | 0.0192 | 0.0187 | -30.3807 | -30.4967 | -29.3404 |
|
641 |
+
| 0.5032 | 7000 | 0.0195 | 0.0188 | 0.0190 | 0.0185 | -31.0799 | -31.2305 | -29.9549 |
|
642 |
+
| 0.5751 | 8000 | 0.0193 | 0.0186 | 0.0188 | 0.0183 | -31.1297 | -31.2883 | -30.0050 |
|
643 |
+
| 0.6470 | 9000 | 0.0192 | 0.0185 | 0.0186 | 0.0182 | -31.2788 | -31.4498 | -30.0589 |
|
644 |
+
| 0.7189 | 10000 | 0.019 | 0.0184 | 0.0185 | 0.0181 | -31.3215 | -31.4903 | -30.0056 |
|
645 |
+
| 0.7908 | 11000 | 0.019 | 0.0183 | 0.0184 | 0.0180 | -31.4416 | -31.6329 | -30.1343 |
|
646 |
+
| 0.8627 | 12000 | 0.0189 | 0.0182 | 0.0184 | 0.0180 | -31.5266 | -31.6991 | -30.1956 |
|
647 |
+
| 0.9346 | 13000 | 0.0188 | 0.0182 | 0.0183 | 0.0179 | -31.5550 | -31.7247 | -30.2442 |
|
648 |
+
|
649 |
+
|
650 |
+
### Framework Versions
|
651 |
+
- Python: 3.10.12
|
652 |
+
- Sentence Transformers: 3.3.1
|
653 |
+
- Transformers: 4.46.3
|
654 |
+
- PyTorch: 2.5.1+cu121
|
655 |
+
- Accelerate: 1.1.1
|
656 |
+
- Datasets: 3.1.0
|
657 |
+
- Tokenizers: 0.20.3
|
658 |
+
|
659 |
+
## Citation
|
660 |
+
|
661 |
+
### BibTeX
|
662 |
+
|
663 |
+
#### Sentence Transformers
|
664 |
+
```bibtex
|
665 |
+
@inproceedings{reimers-2019-sentence-bert,
|
666 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
667 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
668 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
669 |
+
month = "11",
|
670 |
+
year = "2019",
|
671 |
+
publisher = "Association for Computational Linguistics",
|
672 |
+
url = "https://arxiv.org/abs/1908.10084",
|
673 |
+
}
|
674 |
+
```
|
675 |
+
|
676 |
+
<!--
|
677 |
+
## Glossary
|
678 |
+
|
679 |
+
*Clearly define terms in order to be accessible across audiences.*
|
680 |
+
-->
|
681 |
+
|
682 |
+
<!--
|
683 |
+
## Model Card Authors
|
684 |
+
|
685 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
686 |
+
-->
|
687 |
+
|
688 |
+
<!--
|
689 |
+
## Model Card Contact
|
690 |
+
|
691 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
692 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google-bert/bert-base-multilingual-cased",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"directionality": "bidi",
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"pooler_fc_size": 768,
|
21 |
+
"pooler_num_attention_heads": 12,
|
22 |
+
"pooler_num_fc_layers": 3,
|
23 |
+
"pooler_size_per_head": 128,
|
24 |
+
"pooler_type": "first_token_transform",
|
25 |
+
"position_embedding_type": "absolute",
|
26 |
+
"torch_dtype": "float32",
|
27 |
+
"transformers_version": "4.46.3",
|
28 |
+
"type_vocab_size": 2,
|
29 |
+
"use_cache": true,
|
30 |
+
"vocab_size": 119547
|
31 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.3.1",
|
4 |
+
"transformers": "4.46.3",
|
5 |
+
"pytorch": "2.5.1+cu121"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": "cosine"
|
10 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7200caf6dea186097f853f1fd68de8eac6e10ce78f94947828d7381e36033446
|
3 |
+
size 711436136
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 128,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": false,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": false,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"sep_token": "[SEP]",
|
51 |
+
"strip_accents": null,
|
52 |
+
"tokenize_chinese_chars": true,
|
53 |
+
"tokenizer_class": "BertTokenizer",
|
54 |
+
"unk_token": "[UNK]"
|
55 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|