add MT model
Browse files- all_results.json +10 -10
- config.json +2 -2
- generated_predictions.txt +0 -0
- predict_results.json +6 -6
- pytorch_model.bin +3 -0
- tokenizer_config.json +1 -1
- train_results.json +4 -4
- trainer_state.json +7 -7
- training_args.bin +2 -2
all_results.json
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
3 |
-
"predict_bleu":
|
4 |
-
"predict_gen_len":
|
5 |
-
"predict_loss": 2.
|
6 |
-
"predict_runtime":
|
7 |
"predict_samples": 1574,
|
8 |
-
"predict_samples_per_second": 1.
|
9 |
-
"predict_steps_per_second": 0.
|
10 |
-
"train_loss": 2.
|
11 |
-
"train_runtime":
|
12 |
"train_samples": 2287,
|
13 |
-
"train_samples_per_second":
|
14 |
-
"train_steps_per_second": 2.
|
15 |
}
|
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
3 |
+
"predict_bleu": 7.0023,
|
4 |
+
"predict_gen_len": 42.6347,
|
5 |
+
"predict_loss": 2.5708374977111816,
|
6 |
+
"predict_runtime": 932.8364,
|
7 |
"predict_samples": 1574,
|
8 |
+
"predict_samples_per_second": 1.687,
|
9 |
+
"predict_steps_per_second": 0.338,
|
10 |
+
"train_loss": 2.3068067170299993,
|
11 |
+
"train_runtime": 303.1653,
|
12 |
"train_samples": 2287,
|
13 |
+
"train_samples_per_second": 22.631,
|
14 |
+
"train_steps_per_second": 2.266
|
15 |
}
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "m2m100_mos_fr",
|
3 |
"activation_dropout": 0.0,
|
4 |
"activation_function": "relu",
|
5 |
"architectures": [
|
@@ -32,7 +32,7 @@
|
|
32 |
"pad_token_id": 1,
|
33 |
"scale_embedding": true,
|
34 |
"torch_dtype": "float32",
|
35 |
-
"transformers_version": "4.
|
36 |
"use_cache": true,
|
37 |
"vocab_size": 128112
|
38 |
}
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "mos/m2m100_mos_fr",
|
3 |
"activation_dropout": 0.0,
|
4 |
"activation_function": "relu",
|
5 |
"architectures": [
|
|
|
32 |
"pad_token_id": 1,
|
33 |
"scale_embedding": true,
|
34 |
"torch_dtype": "float32",
|
35 |
+
"transformers_version": "4.12.0",
|
36 |
"use_cache": true,
|
37 |
"vocab_size": 128112
|
38 |
}
|
generated_predictions.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
predict_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"predict_bleu":
|
3 |
-
"predict_gen_len":
|
4 |
-
"predict_loss": 2.
|
5 |
-
"predict_runtime":
|
6 |
"predict_samples": 1574,
|
7 |
-
"predict_samples_per_second": 1.
|
8 |
-
"predict_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
+
"predict_bleu": 7.0023,
|
3 |
+
"predict_gen_len": 42.6347,
|
4 |
+
"predict_loss": 2.5708374977111816,
|
5 |
+
"predict_runtime": 932.8364,
|
6 |
"predict_samples": 1574,
|
7 |
+
"predict_samples_per_second": 1.687,
|
8 |
+
"predict_steps_per_second": 0.338
|
9 |
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:efe2ea90ce6978bd975a5685c6923e6249c01d1ebe9b1ccdbc279049af71282a
|
3 |
+
size 1935804116
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"src_lang": null, "tgt_lang": null, "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "sp_model_kwargs": {}, "special_tokens_map_file": "m2m_100_1.2B_v2/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "m2m100_mos_fr", "model_max_length": 1024, "additional_special_tokens": ["__af__", "__am__", "__ar__", "__ast__", "__az__", "__ba__", "__be__", "__bg__", "__bn__", "__br__", "__bs__", "__ca__", "__ceb__", "__cs__", "__cy__", "__da__", "__de__", "__el__", "__en__", "__es__", "__et__", "__fa__", "__ff__", "__fi__", "__fr__", "__fy__", "__ga__", "__gd__", "__gl__", "__gu__", "__ha__", "__he__", "__hi__", "__hr__", "__ht__", "__hu__", "__hy__", "__id__", "__ig__", "__ilo__", "__is__", "__it__", "__ja__", "__jv__", "__ka__", "__kk__", "__km__", "__kn__", "__ko__", "__lb__", "__lg__", "__ln__", "__lo__", "__lt__", "__lv__", "__mg__", "__mk__", "__ml__", "__mn__", "__mr__", "__ms__", "__my__", "__ne__", "__nl__", "__no__", "__ns__", "__oc__", "__or__", "__pa__", "__pl__", "__ps__", "__pt__", "__ro__", "__ru__", "__sd__", "__si__", "__sk__", "__sl__", "__so__", "__sq__", "__sr__", "__ss__", "__su__", "__sv__", "__sw__", "__ta__", "__th__", "__tl__", "__tn__", "__tr__", "__uk__", "__ur__", "__uz__", "__vi__", "__wo__", "__xh__", "__yi__", "__yo__", "__zh__", "__zu__"], "tokenizer_class": "M2M100Tokenizer"}
|
|
|
1 |
+
{"src_lang": null, "tgt_lang": null, "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "sp_model_kwargs": {}, "special_tokens_map_file": "m2m_100_1.2B_v2/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "mos/m2m100_mos_fr", "model_max_length": 1024, "additional_special_tokens": ["__af__", "__am__", "__ar__", "__ast__", "__az__", "__ba__", "__be__", "__bg__", "__bn__", "__br__", "__bs__", "__ca__", "__ceb__", "__cs__", "__cy__", "__da__", "__de__", "__el__", "__en__", "__es__", "__et__", "__fa__", "__ff__", "__fi__", "__fr__", "__fy__", "__ga__", "__gd__", "__gl__", "__gu__", "__ha__", "__he__", "__hi__", "__hr__", "__ht__", "__hu__", "__hy__", "__id__", "__ig__", "__ilo__", "__is__", "__it__", "__ja__", "__jv__", "__ka__", "__kk__", "__km__", "__kn__", "__ko__", "__lb__", "__lg__", "__ln__", "__lo__", "__lt__", "__lv__", "__mg__", "__mk__", "__ml__", "__mn__", "__mr__", "__ms__", "__my__", "__ne__", "__nl__", "__no__", "__ns__", "__oc__", "__or__", "__pa__", "__pl__", "__ps__", "__pt__", "__ro__", "__ru__", "__sd__", "__si__", "__sk__", "__sl__", "__so__", "__sq__", "__sr__", "__ss__", "__su__", "__sv__", "__sw__", "__ta__", "__th__", "__tl__", "__tn__", "__tr__", "__uk__", "__ur__", "__uz__", "__vi__", "__wo__", "__xh__", "__yi__", "__yo__", "__zh__", "__zu__"], "tokenizer_class": "M2M100Tokenizer"}
|
train_results.json
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
3 |
-
"train_loss": 2.
|
4 |
-
"train_runtime":
|
5 |
"train_samples": 2287,
|
6 |
-
"train_samples_per_second":
|
7 |
-
"train_steps_per_second": 2.
|
8 |
}
|
|
|
1 |
{
|
2 |
"epoch": 3.0,
|
3 |
+
"train_loss": 2.3068067170299993,
|
4 |
+
"train_runtime": 303.1653,
|
5 |
"train_samples": 2287,
|
6 |
+
"train_samples_per_second": 22.631,
|
7 |
+
"train_steps_per_second": 2.266
|
8 |
}
|
trainer_state.json
CHANGED
@@ -10,22 +10,22 @@
|
|
10 |
{
|
11 |
"epoch": 2.18,
|
12 |
"learning_rate": 1.3609898107714703e-05,
|
13 |
-
"loss": 2.
|
14 |
"step": 500
|
15 |
},
|
16 |
{
|
17 |
"epoch": 3.0,
|
18 |
"step": 687,
|
19 |
-
"total_flos":
|
20 |
-
"train_loss": 2.
|
21 |
-
"train_runtime":
|
22 |
-
"train_samples_per_second":
|
23 |
-
"train_steps_per_second": 2.
|
24 |
}
|
25 |
],
|
26 |
"max_steps": 687,
|
27 |
"num_train_epochs": 3,
|
28 |
-
"total_flos":
|
29 |
"trial_name": null,
|
30 |
"trial_params": null
|
31 |
}
|
|
|
10 |
{
|
11 |
"epoch": 2.18,
|
12 |
"learning_rate": 1.3609898107714703e-05,
|
13 |
+
"loss": 2.5059,
|
14 |
"step": 500
|
15 |
},
|
16 |
{
|
17 |
"epoch": 3.0,
|
18 |
"step": 687,
|
19 |
+
"total_flos": 1622235349991424.0,
|
20 |
+
"train_loss": 2.3068067170299993,
|
21 |
+
"train_runtime": 303.1653,
|
22 |
+
"train_samples_per_second": 22.631,
|
23 |
+
"train_steps_per_second": 2.266
|
24 |
}
|
25 |
],
|
26 |
"max_steps": 687,
|
27 |
"num_train_epochs": 3,
|
28 |
+
"total_flos": 1622235349991424.0,
|
29 |
"trial_name": null,
|
30 |
"trial_params": null
|
31 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8eb0b478f98e8d9b04eeff2dc65b1652523554c0545d9d09a6377c021b9d29f1
|
3 |
+
size 2927
|