giannisan commited on
Commit
59d62fa
1 Parent(s): 70daf29

Training in progress, epoch 0

Browse files
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-4.0
3
+ base_model: facebook/timesformer-base-finetuned-k400
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ model-index:
9
+ - name: timesformer-base-finetuned-k400-finetuned-ucf101-subset
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # timesformer-base-finetuned-k400-finetuned-ucf101-subset
17
+
18
+ This model is a fine-tuned version of [facebook/timesformer-base-finetuned-k400](https://huggingface.co/facebook/timesformer-base-finetuned-k400) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.0878
21
+ - Accuracy: 1.0
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 5e-05
41
+ - train_batch_size: 6
42
+ - eval_batch_size: 6
43
+ - seed: 42
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: linear
46
+ - lr_scheduler_warmup_ratio: 0.1
47
+ - training_steps: 200
48
+
49
+ ### Training results
50
+
51
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
52
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
53
+ | 0.1523 | 0.25 | 50 | 0.0735 | 1.0 |
54
+ | 0.024 | 1.25 | 100 | 0.0470 | 0.9866 |
55
+ | 0.0583 | 2.25 | 150 | 0.0302 | 0.9866 |
56
+ | 0.0036 | 3.25 | 200 | 0.0297 | 0.9866 |
57
+
58
+
59
+ ### Framework versions
60
+
61
+ - Transformers 4.40.2
62
+ - Pytorch 2.1.0+cu118
63
+ - Datasets 2.19.1
64
+ - Tokenizers 0.19.1
.ipynb_checkpoints/all_results-checkpoint.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.25,
3
+ "eval_accuracy": 1.0,
4
+ "eval_loss": 0.08780594170093536,
5
+ "eval_runtime": 76.2242,
6
+ "eval_samples_per_second": 4.552,
7
+ "eval_steps_per_second": 0.761
8
+ }
.ipynb_checkpoints/test_results-checkpoint.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.25,
3
+ "eval_accuracy": 1.0,
4
+ "eval_loss": 0.08780594170093536,
5
+ "eval_runtime": 76.2242,
6
+ "eval_samples_per_second": 4.552,
7
+ "eval_steps_per_second": 0.761
8
+ }
.ipynb_checkpoints/trainer_state-checkpoint.json ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.0,
3
+ "best_model_checkpoint": "timesformer-base-finetuned-k400-finetuned-ucf101-subset/checkpoint-50",
4
+ "epoch": 3.25,
5
+ "eval_steps": 500,
6
+ "global_step": 200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "grad_norm": 14.361767768859863,
14
+ "learning_rate": 2.5e-05,
15
+ "loss": 2.3816,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.1,
20
+ "grad_norm": 8.448980331420898,
21
+ "learning_rate": 5e-05,
22
+ "loss": 1.5686,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.15,
27
+ "grad_norm": 7.0012526512146,
28
+ "learning_rate": 4.722222222222222e-05,
29
+ "loss": 0.6191,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.2,
34
+ "grad_norm": 4.961479187011719,
35
+ "learning_rate": 4.4444444444444447e-05,
36
+ "loss": 0.3468,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.25,
41
+ "grad_norm": 2.13403058052063,
42
+ "learning_rate": 4.166666666666667e-05,
43
+ "loss": 0.1523,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.25,
48
+ "eval_accuracy": 1.0,
49
+ "eval_loss": 0.07354684174060822,
50
+ "eval_runtime": 34.9344,
51
+ "eval_samples_per_second": 4.265,
52
+ "eval_steps_per_second": 0.716,
53
+ "step": 50
54
+ },
55
+ {
56
+ "epoch": 1.05,
57
+ "grad_norm": 0.933645486831665,
58
+ "learning_rate": 3.888888888888889e-05,
59
+ "loss": 0.0521,
60
+ "step": 60
61
+ },
62
+ {
63
+ "epoch": 1.1,
64
+ "grad_norm": 0.865109920501709,
65
+ "learning_rate": 3.611111111111111e-05,
66
+ "loss": 0.0226,
67
+ "step": 70
68
+ },
69
+ {
70
+ "epoch": 1.15,
71
+ "grad_norm": 0.17869924008846283,
72
+ "learning_rate": 3.3333333333333335e-05,
73
+ "loss": 0.108,
74
+ "step": 80
75
+ },
76
+ {
77
+ "epoch": 1.2,
78
+ "grad_norm": 0.23531177639961243,
79
+ "learning_rate": 3.055555555555556e-05,
80
+ "loss": 0.0107,
81
+ "step": 90
82
+ },
83
+ {
84
+ "epoch": 1.25,
85
+ "grad_norm": 0.1640271246433258,
86
+ "learning_rate": 2.777777777777778e-05,
87
+ "loss": 0.024,
88
+ "step": 100
89
+ },
90
+ {
91
+ "epoch": 1.25,
92
+ "eval_accuracy": 0.9865771812080537,
93
+ "eval_loss": 0.046999599784612656,
94
+ "eval_runtime": 32.3832,
95
+ "eval_samples_per_second": 4.601,
96
+ "eval_steps_per_second": 0.772,
97
+ "step": 100
98
+ },
99
+ {
100
+ "epoch": 2.05,
101
+ "grad_norm": 0.47139155864715576,
102
+ "learning_rate": 2.5e-05,
103
+ "loss": 0.0056,
104
+ "step": 110
105
+ },
106
+ {
107
+ "epoch": 2.1,
108
+ "grad_norm": 0.11248086392879486,
109
+ "learning_rate": 2.2222222222222223e-05,
110
+ "loss": 0.0045,
111
+ "step": 120
112
+ },
113
+ {
114
+ "epoch": 2.15,
115
+ "grad_norm": 0.07324650138616562,
116
+ "learning_rate": 1.9444444444444445e-05,
117
+ "loss": 0.0042,
118
+ "step": 130
119
+ },
120
+ {
121
+ "epoch": 2.2,
122
+ "grad_norm": 0.46895766258239746,
123
+ "learning_rate": 1.6666666666666667e-05,
124
+ "loss": 0.0202,
125
+ "step": 140
126
+ },
127
+ {
128
+ "epoch": 2.25,
129
+ "grad_norm": 0.048765964806079865,
130
+ "learning_rate": 1.388888888888889e-05,
131
+ "loss": 0.0583,
132
+ "step": 150
133
+ },
134
+ {
135
+ "epoch": 2.25,
136
+ "eval_accuracy": 0.9865771812080537,
137
+ "eval_loss": 0.030154313892126083,
138
+ "eval_runtime": 32.7398,
139
+ "eval_samples_per_second": 4.551,
140
+ "eval_steps_per_second": 0.764,
141
+ "step": 150
142
+ },
143
+ {
144
+ "epoch": 3.05,
145
+ "grad_norm": 0.11046731472015381,
146
+ "learning_rate": 1.1111111111111112e-05,
147
+ "loss": 0.0032,
148
+ "step": 160
149
+ },
150
+ {
151
+ "epoch": 3.1,
152
+ "grad_norm": 0.07638247311115265,
153
+ "learning_rate": 8.333333333333334e-06,
154
+ "loss": 0.0027,
155
+ "step": 170
156
+ },
157
+ {
158
+ "epoch": 3.15,
159
+ "grad_norm": 0.0314478874206543,
160
+ "learning_rate": 5.555555555555556e-06,
161
+ "loss": 0.0053,
162
+ "step": 180
163
+ },
164
+ {
165
+ "epoch": 3.2,
166
+ "grad_norm": 0.23283490538597107,
167
+ "learning_rate": 2.777777777777778e-06,
168
+ "loss": 0.0032,
169
+ "step": 190
170
+ },
171
+ {
172
+ "epoch": 3.25,
173
+ "grad_norm": 0.3815344274044037,
174
+ "learning_rate": 0.0,
175
+ "loss": 0.0036,
176
+ "step": 200
177
+ },
178
+ {
179
+ "epoch": 3.25,
180
+ "eval_accuracy": 0.9865771812080537,
181
+ "eval_loss": 0.02967873215675354,
182
+ "eval_runtime": 32.6048,
183
+ "eval_samples_per_second": 4.57,
184
+ "eval_steps_per_second": 0.767,
185
+ "step": 200
186
+ },
187
+ {
188
+ "epoch": 3.25,
189
+ "step": 200,
190
+ "total_flos": 1.0514301109272576e+18,
191
+ "train_loss": 0.2698267618194222,
192
+ "train_runtime": 1064.2955,
193
+ "train_samples_per_second": 1.128,
194
+ "train_steps_per_second": 0.188
195
+ },
196
+ {
197
+ "epoch": 3.25,
198
+ "eval_accuracy": 1.0,
199
+ "eval_loss": 0.08780594170093536,
200
+ "eval_runtime": 86.4118,
201
+ "eval_samples_per_second": 4.016,
202
+ "eval_steps_per_second": 0.671,
203
+ "step": 200
204
+ },
205
+ {
206
+ "epoch": 3.25,
207
+ "eval_accuracy": 1.0,
208
+ "eval_loss": 0.08780594170093536,
209
+ "eval_runtime": 76.2242,
210
+ "eval_samples_per_second": 4.552,
211
+ "eval_steps_per_second": 0.761,
212
+ "step": 200
213
+ }
214
+ ],
215
+ "logging_steps": 10,
216
+ "max_steps": 200,
217
+ "num_input_tokens_seen": 0,
218
+ "num_train_epochs": 9223372036854775807,
219
+ "save_steps": 500,
220
+ "total_flos": 1.0514301109272576e+18,
221
+ "train_batch_size": 6,
222
+ "trial_name": null,
223
+ "trial_params": null
224
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9376ab61570e92ddbc75657778a029c7977423294333a9d8b7544c679041f40c
3
  size 485096872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7066ff06eb97588768c310562feeabbebd4dae8f1b19069cb3365f54df0caa
3
  size 485096872
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0976022ff1e3e4b6c6ed3d4b9f4fbf236be50b91b6fc1edad006409b3e04fd11
3
  size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0227df48b0d08dff9e4610c246ea0c33f3bdb212835acc291f35432a21b34467
3
  size 5048