Michael-Vptn commited on
Commit
2023b49
1 Parent(s): 3bc7055

Upload TFT5ForConditionalGeneration

Browse files
Files changed (4) hide show
  1. README.md +6 -6
  2. config.json +19 -10
  3. generation_config.json +1 -1
  4. tf_model.h5 +1 -1
README.md CHANGED
@@ -4,19 +4,19 @@ base_model: t5-base
4
  tags:
5
  - generated_from_keras_callback
6
  model-index:
7
- - name: demo
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information Keras had access to. You should
12
  probably proofread and complete it, then remove this comment. -->
13
 
14
- # demo
15
 
16
  This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Train Loss: 1.1621
19
- - Validation Loss: 0.8490
20
  - Epoch: 0
21
 
22
  ## Model description
@@ -43,12 +43,12 @@ The following hyperparameters were used during training:
43
 
44
  | Train Loss | Validation Loss | Epoch |
45
  |:----------:|:---------------:|:-----:|
46
- | 1.1621 | 0.8490 | 0 |
47
 
48
 
49
  ### Framework versions
50
 
51
- - Transformers 4.32.0
52
  - TensorFlow 2.13.0
53
  - Datasets 2.14.4
54
  - Tokenizers 0.12.1
 
4
  tags:
5
  - generated_from_keras_callback
6
  model-index:
7
+ - name: text-summarization-t5-base
8
  results: []
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information Keras had access to. You should
12
  probably proofread and complete it, then remove this comment. -->
13
 
14
+ # text-summarization-t5-base
15
 
16
  This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Train Loss: 1.2480
19
+ - Validation Loss: 0.9039
20
  - Epoch: 0
21
 
22
  ## Model description
 
43
 
44
  | Train Loss | Validation Loss | Epoch |
45
  |:----------:|:---------------:|:-----:|
46
+ | 1.2480 | 0.9039 | 0 |
47
 
48
 
49
  ### Framework versions
50
 
51
+ - Transformers 4.32.1
52
  - TensorFlow 2.13.0
53
  - Datasets 2.14.4
54
  - Tokenizers 0.12.1
config.json CHANGED
@@ -1,51 +1,60 @@
1
  {
 
2
  "architectures": [
3
  "T5ForConditionalGeneration"
4
  ],
 
5
  "d_ff": 3072,
6
  "d_kv": 64,
7
  "d_model": 768,
8
  "decoder_start_token_id": 0,
 
9
  "dropout_rate": 0.1,
10
  "eos_token_id": 1,
 
11
  "initializer_factor": 1.0,
12
  "is_encoder_decoder": true,
 
13
  "layer_norm_epsilon": 1e-06,
14
  "model_type": "t5",
15
  "n_positions": 512,
 
16
  "num_heads": 12,
17
  "num_layers": 12,
18
  "output_past": true,
19
  "pad_token_id": 0,
 
20
  "relative_attention_num_buckets": 32,
21
  "task_specific_params": {
22
  "summarization": {
23
- "early_stopping": false,
24
- "length_penalty": 1.0,
25
- "max_length": 800,
26
- "min_length": 50,
27
  "no_repeat_ngram_size": 3,
28
  "num_beams": 4,
29
  "prefix": "summarize: "
30
  },
31
  "translation_en_to_de": {
32
- "early_stopping": false,
33
- "max_length": 800,
34
  "num_beams": 4,
35
  "prefix": "translate English to German: "
36
  },
37
  "translation_en_to_fr": {
38
- "early_stopping": false,
39
- "max_length": 800,
40
  "num_beams": 4,
41
  "prefix": "translate English to French: "
42
  },
43
  "translation_en_to_ro": {
44
- "early_stopping": false,
45
- "max_length": 800,
46
  "num_beams": 4,
47
  "prefix": "translate English to Romanian: "
48
  }
49
  },
 
 
50
  "vocab_size": 32128
51
  }
 
1
  {
2
+ "_name_or_path": "t5-base",
3
  "architectures": [
4
  "T5ForConditionalGeneration"
5
  ],
6
+ "classifier_dropout": 0.0,
7
  "d_ff": 3072,
8
  "d_kv": 64,
9
  "d_model": 768,
10
  "decoder_start_token_id": 0,
11
+ "dense_act_fn": "relu",
12
  "dropout_rate": 0.1,
13
  "eos_token_id": 1,
14
+ "feed_forward_proj": "relu",
15
  "initializer_factor": 1.0,
16
  "is_encoder_decoder": true,
17
+ "is_gated_act": false,
18
  "layer_norm_epsilon": 1e-06,
19
  "model_type": "t5",
20
  "n_positions": 512,
21
+ "num_decoder_layers": 12,
22
  "num_heads": 12,
23
  "num_layers": 12,
24
  "output_past": true,
25
  "pad_token_id": 0,
26
+ "relative_attention_max_distance": 128,
27
  "relative_attention_num_buckets": 32,
28
  "task_specific_params": {
29
  "summarization": {
30
+ "early_stopping": true,
31
+ "length_penalty": 2.0,
32
+ "max_length": 200,
33
+ "min_length": 30,
34
  "no_repeat_ngram_size": 3,
35
  "num_beams": 4,
36
  "prefix": "summarize: "
37
  },
38
  "translation_en_to_de": {
39
+ "early_stopping": true,
40
+ "max_length": 300,
41
  "num_beams": 4,
42
  "prefix": "translate English to German: "
43
  },
44
  "translation_en_to_fr": {
45
+ "early_stopping": true,
46
+ "max_length": 300,
47
  "num_beams": 4,
48
  "prefix": "translate English to French: "
49
  },
50
  "translation_en_to_ro": {
51
+ "early_stopping": true,
52
+ "max_length": 300,
53
  "num_beams": 4,
54
  "prefix": "translate English to Romanian: "
55
  }
56
  },
57
+ "transformers_version": "4.32.1",
58
+ "use_cache": true,
59
  "vocab_size": 32128
60
  }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.27.0.dev0"
7
  }
 
3
  "decoder_start_token_id": 0,
4
  "eos_token_id": 1,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.32.1"
7
  }
tf_model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c5e6e98ff13b8af2aaefe070b943f29e055b3763cd0a63d72e94b0af8929471
3
  size 1089544048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b57fbdd2fa7f640fdc07abdcdc6424b823b1502042112add64b613e91eb0099
3
  size 1089544048