Davlan commited on
Commit
e039bca
1 Parent(s): 6b3bafc

Upload 14 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,61 @@
1
  ---
 
 
 
2
  license: apache-2.0
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - yo
4
+ - yor
5
  license: apache-2.0
6
+ base_model: google/mt5-small
7
+ tags:
8
+ - generated_from_trainer
9
+ metrics:
10
+ - bleu
11
+ model-index:
12
+ - name: mt5_yo_yor
13
+ results: []
14
  ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # mt5_yo_yor
20
+
21
+ This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.7137
24
+ - Bleu: 33.7206
25
+ - Gen Len: 53.0782
26
+
27
+ ## Model description
28
+
29
+ More information needed
30
+
31
+ ## Intended uses & limitations
32
+
33
+ More information needed
34
+
35
+ ## Training and evaluation data
36
+
37
+ More information needed
38
+
39
+ ## Training procedure
40
+
41
+ ### Training hyperparameters
42
+
43
+ The following hyperparameters were used during training:
44
+ - learning_rate: 5e-05
45
+ - train_batch_size: 16
46
+ - eval_batch_size: 16
47
+ - seed: 42
48
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
+ - lr_scheduler_type: linear
50
+ - num_epochs: 3.0
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.31.0
59
+ - Pytorch 2.0.1+cu117
60
+ - Datasets 2.11.0
61
+ - Tokenizers 0.13.3
all_results.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_bleu": 33.7206,
4
+ "eval_gen_len": 53.0782,
5
+ "eval_loss": 0.7137071490287781,
6
+ "eval_runtime": 205.7162,
7
+ "eval_samples": 1100,
8
+ "eval_samples_per_second": 5.347,
9
+ "eval_steps_per_second": 0.335,
10
+ "predict_bleu": 33.4199,
11
+ "predict_gen_len": 56.9864,
12
+ "predict_loss": 0.7419636249542236,
13
+ "predict_runtime": 216.3884,
14
+ "predict_samples": 1100,
15
+ "predict_samples_per_second": 5.083,
16
+ "predict_steps_per_second": 0.319,
17
+ "train_loss": 2.284148406300617,
18
+ "train_runtime": 1232.512,
19
+ "train_samples": 17900,
20
+ "train_samples_per_second": 43.57,
21
+ "train_steps_per_second": 2.724
22
+ }
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mt5-small",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 1024,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "mt5",
19
+ "num_decoder_layers": 8,
20
+ "num_heads": 6,
21
+ "num_layers": 8,
22
+ "pad_token_id": 0,
23
+ "relative_attention_max_distance": 128,
24
+ "relative_attention_num_buckets": 32,
25
+ "tie_word_embeddings": false,
26
+ "tokenizer_class": "T5Tokenizer",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.31.0",
29
+ "use_cache": true,
30
+ "vocab_size": 250112
31
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_bleu": 33.7206,
4
+ "eval_gen_len": 53.0782,
5
+ "eval_loss": 0.7137071490287781,
6
+ "eval_runtime": 205.7162,
7
+ "eval_samples": 1100,
8
+ "eval_samples_per_second": 5.347,
9
+ "eval_steps_per_second": 0.335
10
+ }
generated_predictions.txt ADDED
The diff for this file is too large to render. See raw diff
 
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.31.0"
7
+ }
predict_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "predict_bleu": 33.4199,
3
+ "predict_gen_len": 56.9864,
4
+ "predict_loss": 0.7419636249542236,
5
+ "predict_runtime": 216.3884,
6
+ "predict_samples": 1100,
7
+ "predict_samples_per_second": 5.083,
8
+ "predict_steps_per_second": 0.319
9
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11d4f3fefdcb7b7e8532db3105feeda0899fbf096c91ea3b78261277ce1f5fe7
3
+ size 16330467
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": null,
3
+ "clean_up_tokenization_spaces": true,
4
+ "eos_token": "</s>",
5
+ "extra_ids": 0,
6
+ "legacy": true,
7
+ "model_max_length": 1000000000000000019884624838656,
8
+ "pad_token": "<pad>",
9
+ "sp_model_kwargs": {},
10
+ "tokenizer_class": "T5Tokenizer",
11
+ "unk_token": "<unk>"
12
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 2.284148406300617,
4
+ "train_runtime": 1232.512,
5
+ "train_samples": 17900,
6
+ "train_samples_per_second": 43.57,
7
+ "train_steps_per_second": 2.724
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 3357,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.45,
12
+ "learning_rate": 4.2552874590408106e-05,
13
+ "loss": 6.2133,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.89,
18
+ "learning_rate": 3.5105749180816204e-05,
19
+ "loss": 2.2643,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 1.34,
24
+ "learning_rate": 2.7658623771224308e-05,
25
+ "loss": 1.7468,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 1.79,
30
+ "learning_rate": 2.021149836163241e-05,
31
+ "loss": 1.506,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 2.23,
36
+ "learning_rate": 1.2764372952040512e-05,
37
+ "loss": 1.3734,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 2.68,
42
+ "learning_rate": 5.317247542448615e-06,
43
+ "loss": 1.3065,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "step": 3357,
49
+ "total_flos": 6313176299397120.0,
50
+ "train_loss": 2.284148406300617,
51
+ "train_runtime": 1232.512,
52
+ "train_samples_per_second": 43.57,
53
+ "train_steps_per_second": 2.724
54
+ }
55
+ ],
56
+ "max_steps": 3357,
57
+ "num_train_epochs": 3,
58
+ "total_flos": 6313176299397120.0,
59
+ "trial_name": null,
60
+ "trial_params": null
61
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbf078fef04359a2a1d739b685c4f9b35b84595f226ecababc1945cfc26f42c4
3
+ size 4091