SeyedAli commited on
Commit
b9411a1
1 Parent(s): 809a4d5

SeyedAli/Persian-Text-paraphraser-mT5-V1

Browse files
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: erfan226/persian-t5-paraphraser
3
+ tags:
4
+ - generated_from_trainer
5
+ model-index:
6
+ - name: output
7
+ results: []
8
+ ---
9
+
10
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
11
+ should probably proofread and complete it, then remove this comment. -->
12
+
13
+ # output
14
+
15
+ This model is a fine-tuned version of [erfan226/persian-t5-paraphraser](https://huggingface.co/erfan226/persian-t5-paraphraser) on an unknown dataset.
16
+ It achieves the following results on the evaluation set:
17
+ - Loss: 0.2334
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 2e-05
37
+ - train_batch_size: 5
38
+ - eval_batch_size: 5
39
+ - seed: 42
40
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 10
43
+
44
+ ### Training results
45
+
46
+ | Training Loss | Epoch | Step | Validation Loss |
47
+ |:-------------:|:-----:|:----:|:---------------:|
48
+ | No log | 1.0 | 160 | 0.4026 |
49
+ | No log | 2.0 | 320 | 0.2853 |
50
+ | No log | 3.0 | 480 | 0.2543 |
51
+ | 1.5981 | 4.0 | 640 | 0.2466 |
52
+ | 1.5981 | 5.0 | 800 | 0.2408 |
53
+ | 1.5981 | 6.0 | 960 | 0.2367 |
54
+ | 0.3535 | 7.0 | 1120 | 0.2363 |
55
+ | 0.3535 | 8.0 | 1280 | 0.2344 |
56
+ | 0.3535 | 9.0 | 1440 | 0.2334 |
57
+ | 0.3151 | 10.0 | 1600 | 0.2334 |
58
+
59
+
60
+ ### Framework versions
61
+
62
+ - Transformers 4.33.1
63
+ - Pytorch 2.0.1+cu118
64
+ - Datasets 2.14.5
65
+ - Tokenizers 0.13.3
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "erfan226/persian-t5-paraphraser",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 2048,
8
+ "d_kv": 64,
9
+ "d_model": 768,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "gelu_new",
12
+ "dropout_rate": 0.1,
13
+ "eos_token_id": 1,
14
+ "feed_forward_proj": "gated-gelu",
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.33.1",
30
+ "use_cache": true,
31
+ "vocab_size": 32103
32
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.33.1"
7
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5f6864d274469852700a7d0c357e74d2ca90d7d340517ae64fcdb73ee86650
3
+ size 990255285
special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_special_tokens": true,
3
+ "additional_special_tokens": [
4
+ "<extra_id_0>",
5
+ "<extra_id_1>",
6
+ "<extra_id_2>",
7
+ "<extra_id_3>",
8
+ "<extra_id_4>",
9
+ "<extra_id_5>",
10
+ "<extra_id_6>",
11
+ "<extra_id_7>",
12
+ "<extra_id_8>",
13
+ "<extra_id_9>",
14
+ "<extra_id_10>",
15
+ "<extra_id_11>",
16
+ "<extra_id_12>",
17
+ "<extra_id_13>",
18
+ "<extra_id_14>",
19
+ "<extra_id_15>",
20
+ "<extra_id_16>",
21
+ "<extra_id_17>",
22
+ "<extra_id_18>",
23
+ "<extra_id_19>",
24
+ "<extra_id_20>",
25
+ "<extra_id_21>",
26
+ "<extra_id_22>",
27
+ "<extra_id_23>",
28
+ "<extra_id_24>",
29
+ "<extra_id_25>",
30
+ "<extra_id_26>",
31
+ "<extra_id_27>",
32
+ "<extra_id_28>",
33
+ "<extra_id_29>",
34
+ "<extra_id_30>",
35
+ "<extra_id_31>",
36
+ "<extra_id_32>",
37
+ "<extra_id_33>",
38
+ "<extra_id_34>",
39
+ "<extra_id_35>",
40
+ "<extra_id_36>",
41
+ "<extra_id_37>",
42
+ "<extra_id_38>",
43
+ "<extra_id_39>",
44
+ "<extra_id_40>",
45
+ "<extra_id_41>",
46
+ "<extra_id_42>",
47
+ "<extra_id_43>",
48
+ "<extra_id_44>",
49
+ "<extra_id_45>",
50
+ "<extra_id_46>",
51
+ "<extra_id_47>",
52
+ "<extra_id_48>",
53
+ "<extra_id_49>",
54
+ "<extra_id_50>",
55
+ "<extra_id_51>",
56
+ "<extra_id_52>",
57
+ "<extra_id_53>",
58
+ "<extra_id_54>",
59
+ "<extra_id_55>",
60
+ "<extra_id_56>",
61
+ "<extra_id_57>",
62
+ "<extra_id_58>",
63
+ "<extra_id_59>",
64
+ "<extra_id_60>",
65
+ "<extra_id_61>",
66
+ "<extra_id_62>",
67
+ "<extra_id_63>",
68
+ "<extra_id_64>",
69
+ "<extra_id_65>",
70
+ "<extra_id_66>",
71
+ "<extra_id_67>",
72
+ "<extra_id_68>",
73
+ "<extra_id_69>",
74
+ "<extra_id_70>",
75
+ "<extra_id_71>",
76
+ "<extra_id_72>",
77
+ "<extra_id_73>",
78
+ "<extra_id_74>",
79
+ "<extra_id_75>",
80
+ "<extra_id_76>",
81
+ "<extra_id_77>",
82
+ "<extra_id_78>",
83
+ "<extra_id_79>",
84
+ "<extra_id_80>",
85
+ "<extra_id_81>",
86
+ "<extra_id_82>",
87
+ "<extra_id_83>",
88
+ "<extra_id_84>",
89
+ "<extra_id_85>",
90
+ "<extra_id_86>",
91
+ "<extra_id_87>",
92
+ "<extra_id_88>",
93
+ "<extra_id_89>",
94
+ "<extra_id_90>",
95
+ "<extra_id_91>",
96
+ "<extra_id_92>",
97
+ "<extra_id_93>",
98
+ "<extra_id_94>",
99
+ "<extra_id_95>",
100
+ "<extra_id_96>",
101
+ "<extra_id_97>",
102
+ "<extra_id_98>",
103
+ "<extra_id_99>"
104
+ ],
105
+ "clean_up_tokenization_spaces": true,
106
+ "eos_token": "</s>",
107
+ "extra_ids": 100,
108
+ "max_length": 64,
109
+ "model_max_length": 100,
110
+ "pad_to_multiple_of": null,
111
+ "pad_token": "<pad>",
112
+ "pad_token_type_id": 0,
113
+ "padding_side": "right",
114
+ "stride": 0,
115
+ "tokenizer_class": "T5Tokenizer",
116
+ "truncation_side": "right",
117
+ "truncation_strategy": "longest_first",
118
+ "unk_token": "<unk>"
119
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b33ba54a4c5de9dd00cd74bcb465f198a5d9b6bf6f842f5e7bc44544f26d67b5
3
+ size 4027