Initial commit
Browse files- .gitattributes +1 -0
- README.md +231 -0
- benchmark_results.txt +1 -0
- benchmark_translations.zip +0 -0
- config.json +41 -0
- generation_config.json +16 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
- source.spm +3 -0
- special_tokens_map.json +1 -0
- target.spm +3 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.spm filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
language:
|
4 |
+
- af
|
5 |
+
- ang
|
6 |
+
- bar
|
7 |
+
- bi
|
8 |
+
- bzj
|
9 |
+
- da
|
10 |
+
- de
|
11 |
+
- djk
|
12 |
+
- drt
|
13 |
+
- en
|
14 |
+
- enm
|
15 |
+
- es
|
16 |
+
- fo
|
17 |
+
- fr
|
18 |
+
- frr
|
19 |
+
- fy
|
20 |
+
- gos
|
21 |
+
- got
|
22 |
+
- gsw
|
23 |
+
- hrx
|
24 |
+
- hwc
|
25 |
+
- icr
|
26 |
+
- is
|
27 |
+
- it
|
28 |
+
- jam
|
29 |
+
- kri
|
30 |
+
- ksh
|
31 |
+
- lb
|
32 |
+
- li
|
33 |
+
- nb
|
34 |
+
- nds
|
35 |
+
- nl
|
36 |
+
- nn
|
37 |
+
- no
|
38 |
+
- non
|
39 |
+
- pcm
|
40 |
+
- pis
|
41 |
+
- pt
|
42 |
+
- rop
|
43 |
+
- sco
|
44 |
+
- srm
|
45 |
+
- srn
|
46 |
+
- stq
|
47 |
+
- sv
|
48 |
+
- swg
|
49 |
+
- tcs
|
50 |
+
- tpi
|
51 |
+
- yi
|
52 |
+
- zea
|
53 |
+
|
54 |
+
tags:
|
55 |
+
- translation
|
56 |
+
- opus-mt-tc-bible
|
57 |
+
|
58 |
+
license: apache-2.0
|
59 |
+
model-index:
|
60 |
+
- name: opus-mt-tc-bible-big-gem-fra_ita_por_spa
|
61 |
+
results:
|
62 |
+
- task:
|
63 |
+
name: Translation multi-multi
|
64 |
+
type: translation
|
65 |
+
args: multi-multi
|
66 |
+
dataset:
|
67 |
+
name: tatoeba-test-v2020-07-28-v2023-09-26
|
68 |
+
type: tatoeba_mt
|
69 |
+
args: multi-multi
|
70 |
+
metrics:
|
71 |
+
- name: BLEU
|
72 |
+
type: bleu
|
73 |
+
value: 43.1
|
74 |
+
- name: chr-F
|
75 |
+
type: chrf
|
76 |
+
value: 0.69633
|
77 |
+
---
|
78 |
+
# opus-mt-tc-bible-big-gem-fra_ita_por_spa
|
79 |
+
|
80 |
+
## Table of Contents
|
81 |
+
- [Model Details](#model-details)
|
82 |
+
- [Uses](#uses)
|
83 |
+
- [Risks, Limitations and Biases](#risks-limitations-and-biases)
|
84 |
+
- [How to Get Started With the Model](#how-to-get-started-with-the-model)
|
85 |
+
- [Training](#training)
|
86 |
+
- [Evaluation](#evaluation)
|
87 |
+
- [Citation Information](#citation-information)
|
88 |
+
- [Acknowledgements](#acknowledgements)
|
89 |
+
|
90 |
+
## Model Details
|
91 |
+
|
92 |
+
Neural machine translation model for translating from Germanic languages (gem) to unknown (fra+ita+por+spa).
|
93 |
+
|
94 |
+
This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train).
|
95 |
+
**Model Description:**
|
96 |
+
- **Developed by:** Language Technology Research Group at the University of Helsinki
|
97 |
+
- **Model Type:** Translation (transformer-big)
|
98 |
+
- **Release**: 2024-08-17
|
99 |
+
- **License:** Apache-2.0
|
100 |
+
- **Language(s):**
|
101 |
+
- Source Language(s): afr ang bar bis bzj dan deu djk drt eng enm fao frr fry gos got gsw hrx hwc icr isl jam kri ksh lim ltz nds nld nno nob non nor pcm pis rop sco srm srn stq swe swg tcs tpi yid zea
|
102 |
+
- Target Language(s): fra ita por spa
|
103 |
+
- Valid Target Language Labels: >>fra<< >>ita<< >>por<< >>spa<< >>xxx<<
|
104 |
+
- **Original Model**: [opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/gem-fra+ita+por+spa/opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.zip)
|
105 |
+
- **Resources for more information:**
|
106 |
+
- [OPUS-MT dashboard](https://opus.nlpl.eu/dashboard/index.php?pkg=opusmt&test=all&scoreslang=all&chart=standard&model=Tatoeba-MT-models/gem-fra%2Bita%2Bpor%2Bspa/opusTCv20230926max50%2Bbt%2Bjhubc_transformer-big_2024-08-17)
|
107 |
+
- [OPUS-MT-train GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train)
|
108 |
+
- [More information about MarianNMT models in the transformers library](https://huggingface.co/docs/transformers/model_doc/marian)
|
109 |
+
- [Tatoeba Translation Challenge](https://github.com/Helsinki-NLP/Tatoeba-Challenge/)
|
110 |
+
- [HPLT bilingual data v1 (as part of the Tatoeba Translation Challenge dataset)](https://hplt-project.org/datasets/v1)
|
111 |
+
- [A massively parallel Bible corpus](https://aclanthology.org/L14-1215/)
|
112 |
+
|
113 |
+
This is a multilingual translation model with multiple target languages. A sentence initial language token is required in the form of `>>id<<` (id = valid target language ID), e.g. `>>fra<<`
|
114 |
+
|
115 |
+
## Uses
|
116 |
+
|
117 |
+
This model can be used for translation and text-to-text generation.
|
118 |
+
|
119 |
+
## Risks, Limitations and Biases
|
120 |
+
|
121 |
+
**CONTENT WARNING: Readers should be aware that the model is trained on various public data sets that may contain content that is disturbing, offensive, and can propagate historical and current stereotypes.**
|
122 |
+
|
123 |
+
Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)).
|
124 |
+
|
125 |
+
## How to Get Started With the Model
|
126 |
+
|
127 |
+
A short example code:
|
128 |
+
|
129 |
+
```python
|
130 |
+
from transformers import MarianMTModel, MarianTokenizer
|
131 |
+
|
132 |
+
src_text = [
|
133 |
+
">>por<< "Thanks a lot!" "My pleasure!"",
|
134 |
+
">>por<< Don't look through the keyhole."
|
135 |
+
]
|
136 |
+
|
137 |
+
model_name = "pytorch-models/opus-mt-tc-bible-big-gem-fra_ita_por_spa"
|
138 |
+
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
139 |
+
model = MarianMTModel.from_pretrained(model_name)
|
140 |
+
translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
|
141 |
+
|
142 |
+
for t in translated:
|
143 |
+
print( tokenizer.decode(t, skip_special_tokens=True) )
|
144 |
+
|
145 |
+
# expected output:
|
146 |
+
# "Muito obrigado!" "Meu prazer!"
|
147 |
+
# Não olhe pela fechadura.
|
148 |
+
```
|
149 |
+
|
150 |
+
You can also use OPUS-MT models with the transformers pipelines, for example:
|
151 |
+
|
152 |
+
```python
|
153 |
+
from transformers import pipeline
|
154 |
+
pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-bible-big-gem-fra_ita_por_spa")
|
155 |
+
print(pipe(">>por<< "Thanks a lot!" "My pleasure!""))
|
156 |
+
|
157 |
+
# expected output: "Muito obrigado!" "Meu prazer!"
|
158 |
+
```
|
159 |
+
|
160 |
+
## Training
|
161 |
+
|
162 |
+
- **Data**: opusTCv20230926max50+bt+jhubc ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge))
|
163 |
+
- **Pre-processing**: SentencePiece (spm32k,spm32k)
|
164 |
+
- **Model Type:** transformer-big
|
165 |
+
- **Original MarianNMT Model**: [opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/gem-fra+ita+por+spa/opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.zip)
|
166 |
+
- **Training Scripts**: [GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train)
|
167 |
+
|
168 |
+
## Evaluation
|
169 |
+
|
170 |
+
* [Model scores at the OPUS-MT dashboard](https://opus.nlpl.eu/dashboard/index.php?pkg=opusmt&test=all&scoreslang=all&chart=standard&model=Tatoeba-MT-models/gem-fra%2Bita%2Bpor%2Bspa/opusTCv20230926max50%2Bbt%2Bjhubc_transformer-big_2024-08-17)
|
171 |
+
* test set translations: [opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/gem-fra+ita+por+spa/opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.test.txt)
|
172 |
+
* test set scores: [opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/gem-fra+ita+por+spa/opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17.eval.txt)
|
173 |
+
* benchmark results: [benchmark_results.txt](benchmark_results.txt)
|
174 |
+
* benchmark output: [benchmark_translations.zip](benchmark_translations.zip)
|
175 |
+
|
176 |
+
| langpair | testset | chr-F | BLEU | #sent | #words |
|
177 |
+
|----------|---------|-------|-------|-------|--------|
|
178 |
+
| multi-multi | tatoeba-test-v2020-07-28-v2023-09-26 | 0.69633 | 43.1 | 10000 | 82876 |
|
179 |
+
|
180 |
+
## Citation Information
|
181 |
+
|
182 |
+
* Publications: [Democratizing neural machine translation with OPUS-MT](https://doi.org/10.1007/s10579-023-09704-w) and [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.)
|
183 |
+
|
184 |
+
```bibtex
|
185 |
+
@article{tiedemann2023democratizing,
|
186 |
+
title={Democratizing neural machine translation with {OPUS-MT}},
|
187 |
+
author={Tiedemann, J{\"o}rg and Aulamo, Mikko and Bakshandaeva, Daria and Boggia, Michele and Gr{\"o}nroos, Stig-Arne and Nieminen, Tommi and Raganato, Alessandro and Scherrer, Yves and Vazquez, Raul and Virpioja, Sami},
|
188 |
+
journal={Language Resources and Evaluation},
|
189 |
+
number={58},
|
190 |
+
pages={713--755},
|
191 |
+
year={2023},
|
192 |
+
publisher={Springer Nature},
|
193 |
+
issn={1574-0218},
|
194 |
+
doi={10.1007/s10579-023-09704-w}
|
195 |
+
}
|
196 |
+
|
197 |
+
@inproceedings{tiedemann-thottingal-2020-opus,
|
198 |
+
title = "{OPUS}-{MT} {--} Building open translation services for the World",
|
199 |
+
author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh},
|
200 |
+
booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation",
|
201 |
+
month = nov,
|
202 |
+
year = "2020",
|
203 |
+
address = "Lisboa, Portugal",
|
204 |
+
publisher = "European Association for Machine Translation",
|
205 |
+
url = "https://aclanthology.org/2020.eamt-1.61",
|
206 |
+
pages = "479--480",
|
207 |
+
}
|
208 |
+
|
209 |
+
@inproceedings{tiedemann-2020-tatoeba,
|
210 |
+
title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}",
|
211 |
+
author = {Tiedemann, J{\"o}rg},
|
212 |
+
booktitle = "Proceedings of the Fifth Conference on Machine Translation",
|
213 |
+
month = nov,
|
214 |
+
year = "2020",
|
215 |
+
address = "Online",
|
216 |
+
publisher = "Association for Computational Linguistics",
|
217 |
+
url = "https://aclanthology.org/2020.wmt-1.139",
|
218 |
+
pages = "1174--1182",
|
219 |
+
}
|
220 |
+
```
|
221 |
+
|
222 |
+
## Acknowledgements
|
223 |
+
|
224 |
+
The work is supported by the [HPLT project](https://hplt-project.org/), funded by the European Union’s Horizon Europe research and innovation programme under grant agreement No 101070350. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland, and the [EuroHPC supercomputer LUMI](https://www.lumi-supercomputer.eu/).
|
225 |
+
|
226 |
+
## Model conversion info
|
227 |
+
|
228 |
+
* transformers version: 4.45.1
|
229 |
+
* OPUS-MT git hash: 0882077
|
230 |
+
* port time: Tue Oct 8 11:07:52 EEST 2024
|
231 |
+
* port machine: LM0-400-22516.local
|
benchmark_results.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
multi-multi tatoeba-test-v2020-07-28-v2023-09-26 0.69633 43.1 10000 82876
|
benchmark_translations.zip
ADDED
File without changes
|
config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "pytorch-models/opus-mt-tc-bible-big-gem-fra_ita_por_spa",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"activation_function": "relu",
|
5 |
+
"architectures": [
|
6 |
+
"MarianMTModel"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.0,
|
9 |
+
"bos_token_id": 0,
|
10 |
+
"classifier_dropout": 0.0,
|
11 |
+
"d_model": 1024,
|
12 |
+
"decoder_attention_heads": 16,
|
13 |
+
"decoder_ffn_dim": 4096,
|
14 |
+
"decoder_layerdrop": 0.0,
|
15 |
+
"decoder_layers": 6,
|
16 |
+
"decoder_start_token_id": 56812,
|
17 |
+
"decoder_vocab_size": 56813,
|
18 |
+
"dropout": 0.1,
|
19 |
+
"encoder_attention_heads": 16,
|
20 |
+
"encoder_ffn_dim": 4096,
|
21 |
+
"encoder_layerdrop": 0.0,
|
22 |
+
"encoder_layers": 6,
|
23 |
+
"eos_token_id": 495,
|
24 |
+
"forced_eos_token_id": null,
|
25 |
+
"init_std": 0.02,
|
26 |
+
"is_encoder_decoder": true,
|
27 |
+
"max_length": null,
|
28 |
+
"max_position_embeddings": 1024,
|
29 |
+
"model_type": "marian",
|
30 |
+
"normalize_embedding": false,
|
31 |
+
"num_beams": null,
|
32 |
+
"num_hidden_layers": 6,
|
33 |
+
"pad_token_id": 56812,
|
34 |
+
"scale_embedding": true,
|
35 |
+
"share_encoder_decoder_embeddings": true,
|
36 |
+
"static_position_embeddings": true,
|
37 |
+
"torch_dtype": "float32",
|
38 |
+
"transformers_version": "4.45.1",
|
39 |
+
"use_cache": true,
|
40 |
+
"vocab_size": 56813
|
41 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bad_words_ids": [
|
4 |
+
[
|
5 |
+
56812
|
6 |
+
]
|
7 |
+
],
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"decoder_start_token_id": 56812,
|
10 |
+
"eos_token_id": 495,
|
11 |
+
"forced_eos_token_id": 495,
|
12 |
+
"max_length": 512,
|
13 |
+
"num_beams": 4,
|
14 |
+
"pad_token_id": 56812,
|
15 |
+
"transformers_version": "4.45.1"
|
16 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de207b5de62d074024092db914b4c0cc0dfb3de0343b8d72ee2789e639e177f2
|
3 |
+
size 938392420
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dbe519c1a7fc41c1d3f6a6b3e02056378ad578c6ad2ba6aea9f2bbbd54a6d489
|
3 |
+
size 938443653
|
source.spm
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0d1ab06ee1bca0f9c9fea6c5b4993c6ec1f61a7a9f455fbd3e5dbc0764c97d77
|
3 |
+
size 795086
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
target.spm
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be868d4a83316a930351b231efa0745cbba426d8beacf20931b8bb75f4e0bb87
|
3 |
+
size 817353
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"source_lang": "gem", "target_lang": "fra+ita+por+spa", "unk_token": "<unk>", "eos_token": "</s>", "pad_token": "<pad>", "model_max_length": 512, "sp_model_kwargs": {}, "separate_vocabs": false, "special_tokens_map_file": null, "name_or_path": "marian-models/opusTCv20230926max50+bt+jhubc_transformer-big_2024-08-17/gem-fra+ita+por+spa", "tokenizer_class": "MarianTokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|