AlekseyKorshuk commited on
Commit
4ab4ba6
1 Parent(s): 7cc603b

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/sum-41")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/18ckwm0s/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Sum 41's lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/1rj7iazy) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/1rj7iazy/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
 
45
  dataset = load_dataset("huggingartists/sum-41")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/3fy2kvn1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on Sum 41's lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/2hgx7kne) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/2hgx7kne/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
@@ -17,7 +17,9 @@
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
 
20
  "resid_pdrop": 0.1,
 
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
23
  "summary_first_dropout": 0.1,
@@ -34,7 +36,7 @@
34
  }
35
  },
36
  "torch_dtype": "float32",
37
- "transformers_version": "4.11.3",
38
  "use_cache": true,
39
  "vocab_size": 50257
40
  }
 
1
  {
2
+ "_name_or_path": "sum-41",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
 
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
  "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
  "scale_attn_weights": true,
24
  "summary_activation": null,
25
  "summary_first_dropout": 0.1,
 
36
  }
37
  },
38
  "torch_dtype": "float32",
39
+ "transformers_version": "4.16.2",
40
  "use_cache": true,
41
  "vocab_size": 50257
42
  }
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 2.928959369659424, "eval_runtime": 1.264, "eval_samples_per_second": 20.57, "eval_steps_per_second": 3.165, "epoch": 1.0}
 
1
+ {"eval_loss": 2.578360080718994, "eval_runtime": 1.3642, "eval_samples_per_second": 20.526, "eval_steps_per_second": 2.932, "epoch": 11.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f35e4bc2c2893f5e3fcec13df7ba6c6cebae70ea24bac23a46ece291f58ffe2e
3
  size 497764120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cb5eb5a9606f4614f22fa3b50a61f459a02d4b622e5ad82345fce00fe13980d
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bda1875ee51eba220fc899c04abb3275854178f035fe39897630388145cad8b
3
  size 995603825
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63d6f27da54aff7f05f32711aa0ca1993ed6674681766ddf8ad9ddffe0163883
3
  size 995603825
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce8da8c94f29bb06679607e8ffcf5182ea0701f672be8aa2a0e647e140062778
3
  size 510403817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41148ab2cccbd2659c86acc8286fe888d6cde6c328be53f18c2730011587797b
3
  size 510403817
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2eede3ee5ff25e9002bcd2fb29039533bffed5336005be0b71df1ad1072371bc
3
  size 14503
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8634c46781e97e9657fc0ec952335eb8c634c60e1a03871907657b503158b7d7
3
  size 14503
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d8130029718e3533ddde75a2c06f96da583a4d80cbf252f6ba4e6fe855637e8
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d1ac50aae75de09c176668acc47db9824dd3efe4a9761503e54948712843f45
3
  size 623
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "GPT2Tokenizer"}
 
1
+ {"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "huggingartists/sum-41", "tokenizer_class": "GPT2Tokenizer"}
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 2.928959369659424,
3
- "best_model_checkpoint": "output/sum-41/checkpoint-18",
4
- "epoch": 1.0,
5
- "global_step": 18,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -32,11 +32,159 @@
32
  "eval_samples_per_second": 22.691,
33
  "eval_steps_per_second": 3.491,
34
  "step": 18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  }
36
  ],
37
- "max_steps": 18,
38
- "num_train_epochs": 1,
39
- "total_flos": 18682380288000.0,
40
  "trial_name": null,
41
  "trial_params": null
42
  }
 
1
  {
2
+ "best_metric": 2.578360080718994,
3
+ "best_model_checkpoint": "output/sum-41/checkpoint-108",
4
+ "epoch": 6.0,
5
+ "global_step": 108,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
32
  "eval_samples_per_second": 22.691,
33
  "eval_steps_per_second": 3.491,
34
  "step": 18
35
+ },
36
+ {
37
+ "epoch": 1.11,
38
+ "learning_rate": 4.137086214086682e-06,
39
+ "loss": 2.7011,
40
+ "step": 20
41
+ },
42
+ {
43
+ "epoch": 1.39,
44
+ "learning_rate": 4.513741816785908e-05,
45
+ "loss": 2.8934,
46
+ "step": 25
47
+ },
48
+ {
49
+ "epoch": 1.67,
50
+ "learning_rate": 0.00010290000000000001,
51
+ "loss": 2.793,
52
+ "step": 30
53
+ },
54
+ {
55
+ "epoch": 1.94,
56
+ "learning_rate": 0.00013615781185663748,
57
+ "loss": 2.7355,
58
+ "step": 35
59
+ },
60
+ {
61
+ "epoch": 2.0,
62
+ "eval_loss": 2.6775753498077393,
63
+ "eval_runtime": 1.2274,
64
+ "eval_samples_per_second": 22.813,
65
+ "eval_steps_per_second": 3.259,
66
+ "step": 36
67
+ },
68
+ {
69
+ "epoch": 2.22,
70
+ "learning_rate": 0.0001211506487979619,
71
+ "loss": 2.6552,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 2.5,
76
+ "learning_rate": 6.860000000000001e-05,
77
+ "loss": 2.4943,
78
+ "step": 45
79
+ },
80
+ {
81
+ "epoch": 2.78,
82
+ "learning_rate": 1.6049351202038163e-05,
83
+ "loss": 2.2804,
84
+ "step": 50
85
+ },
86
+ {
87
+ "epoch": 3.0,
88
+ "eval_loss": 2.632376194000244,
89
+ "eval_runtime": 1.2528,
90
+ "eval_samples_per_second": 22.349,
91
+ "eval_steps_per_second": 3.193,
92
+ "step": 54
93
+ },
94
+ {
95
+ "epoch": 3.06,
96
+ "learning_rate": 1.0421881433625223e-06,
97
+ "loss": 2.6341,
98
+ "step": 55
99
+ },
100
+ {
101
+ "epoch": 3.33,
102
+ "learning_rate": 3.4300000000000014e-05,
103
+ "loss": 2.3676,
104
+ "step": 60
105
+ },
106
+ {
107
+ "epoch": 3.61,
108
+ "learning_rate": 9.206258183214083e-05,
109
+ "loss": 2.2182,
110
+ "step": 65
111
+ },
112
+ {
113
+ "epoch": 3.89,
114
+ "learning_rate": 0.00013306291378591332,
115
+ "loss": 2.4212,
116
+ "step": 70
117
+ },
118
+ {
119
+ "epoch": 4.0,
120
+ "eval_loss": 2.5963516235351562,
121
+ "eval_runtime": 1.2572,
122
+ "eval_samples_per_second": 22.272,
123
+ "eval_steps_per_second": 3.182,
124
+ "step": 72
125
+ },
126
+ {
127
+ "epoch": 4.17,
128
+ "learning_rate": 0.00012800934269961248,
129
+ "loss": 2.2587,
130
+ "step": 75
131
+ },
132
+ {
133
+ "epoch": 4.44,
134
+ "learning_rate": 8.051226498795145e-05,
135
+ "loss": 2.1767,
136
+ "step": 80
137
+ },
138
+ {
139
+ "epoch": 4.72,
140
+ "learning_rate": 2.4504769975503385e-05,
141
+ "loss": 2.1842,
142
+ "step": 85
143
+ },
144
+ {
145
+ "epoch": 5.0,
146
+ "learning_rate": 0.0,
147
+ "loss": 2.2178,
148
+ "step": 90
149
+ },
150
+ {
151
+ "epoch": 5.0,
152
+ "eval_loss": 2.578657388687134,
153
+ "eval_runtime": 1.2377,
154
+ "eval_samples_per_second": 22.622,
155
+ "eval_steps_per_second": 3.232,
156
+ "step": 90
157
+ },
158
+ {
159
+ "epoch": 5.28,
160
+ "learning_rate": 2.4504769975503317e-05,
161
+ "loss": 2.0438,
162
+ "step": 95
163
+ },
164
+ {
165
+ "epoch": 5.56,
166
+ "learning_rate": 8.051226498795124e-05,
167
+ "loss": 2.1724,
168
+ "step": 100
169
+ },
170
+ {
171
+ "epoch": 5.83,
172
+ "learning_rate": 0.00012800934269961248,
173
+ "loss": 1.9973,
174
+ "step": 105
175
+ },
176
+ {
177
+ "epoch": 6.0,
178
+ "eval_loss": 2.578360080718994,
179
+ "eval_runtime": 1.2443,
180
+ "eval_samples_per_second": 22.503,
181
+ "eval_steps_per_second": 3.215,
182
+ "step": 108
183
  }
184
  ],
185
+ "max_steps": 198,
186
+ "num_train_epochs": 11,
187
+ "total_flos": 110787821568000.0,
188
  "trial_name": null,
189
  "trial_params": null
190
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8591fbca38538cc0025afd8c45498ffca953cc8e2e6e0eb814c791f923ccca24
3
- size 2863
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:209473ae5c5612ab3bd6531c165705ecc7cd87270e61a818a0765da5d7b04b55
3
+ size 3055