dima806 commited on
Commit
4340d83
1 Parent(s): aedf551

Upload folder using huggingface_hub

Browse files
checkpoint-7623/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:62dfc8be86043484d9eaf20856f78629ee851e733dba965646beada2aa0a8e49
3
  size 343365480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:342fd0d652045819eaa7b50802aa55cd6efea8c969aea62fbb0d9f9dd2627d34
3
  size 343365480
checkpoint-7623/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cb453adcd761222b4113170fb483b5b9b6837b2124004e76b7761389266edcb
3
  size 686851461
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9db7852739f634f86458593aa2ef21f3d60a69626c2715f02f5f900b8a863074
3
  size 686851461
checkpoint-7623/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 1.3125017881393433,
3
  "best_model_checkpoint": "cat_breed_image_detection/checkpoint-7623",
4
  "epoch": 3.0,
5
  "eval_steps": 500,
@@ -10,137 +10,137 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.19677292404565133,
13
- "grad_norm": 7.071441173553467,
14
  "learning_rate": 8.465205334741845e-07,
15
- "loss": 1.2994,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.39354584809130266,
20
- "grad_norm": 9.168732643127441,
21
  "learning_rate": 7.870989040010564e-07,
22
- "loss": 1.3024,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.5903187721369539,
27
- "grad_norm": 8.262181282043457,
28
  "learning_rate": 7.276772745279281e-07,
29
- "loss": 1.2652,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 0.7870916961826053,
34
- "grad_norm": 10.966161727905273,
35
  "learning_rate": 6.682556450547999e-07,
36
- "loss": 1.2789,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 0.9838646202282566,
41
- "grad_norm": 8.47767448425293,
42
  "learning_rate": 6.088340155816718e-07,
43
- "loss": 1.269,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 1.0,
48
- "eval_accuracy": 0.6525409654561559,
49
- "eval_loss": 1.3324801921844482,
50
- "eval_model_preparation_time": 0.0054,
51
- "eval_runtime": 1508.0216,
52
- "eval_samples_per_second": 71.872,
53
- "eval_steps_per_second": 2.246,
54
  "step": 2541
55
  },
56
  {
57
  "epoch": 1.1806375442739079,
58
- "grad_norm": 14.689343452453613,
59
  "learning_rate": 5.494123861085435e-07,
60
- "loss": 1.2627,
61
  "step": 3000
62
  },
63
  {
64
  "epoch": 1.3774104683195592,
65
- "grad_norm": 7.966395854949951,
66
  "learning_rate": 4.899907566354153e-07,
67
- "loss": 1.2615,
68
  "step": 3500
69
  },
70
  {
71
  "epoch": 1.5741833923652107,
72
- "grad_norm": 6.865269184112549,
73
  "learning_rate": 4.3056912716228705e-07,
74
- "loss": 1.2636,
75
  "step": 4000
76
  },
77
  {
78
  "epoch": 1.770956316410862,
79
- "grad_norm": 9.544075012207031,
80
  "learning_rate": 3.7114749768915883e-07,
81
- "loss": 1.2484,
82
  "step": 4500
83
  },
84
  {
85
  "epoch": 1.9677292404565132,
86
- "grad_norm": 12.453811645507812,
87
  "learning_rate": 3.117258682160306e-07,
88
- "loss": 1.264,
89
  "step": 5000
90
  },
91
  {
92
  "epoch": 2.0,
93
- "eval_accuracy": 0.6554196191319752,
94
- "eval_loss": 1.3183132410049438,
95
- "eval_model_preparation_time": 0.0054,
96
- "eval_runtime": 1205.014,
97
- "eval_samples_per_second": 89.944,
98
- "eval_steps_per_second": 2.811,
99
  "step": 5082
100
  },
101
  {
102
  "epoch": 2.1645021645021645,
103
- "grad_norm": 9.422158241271973,
104
  "learning_rate": 2.523042387429024e-07,
105
- "loss": 1.2342,
106
  "step": 5500
107
  },
108
  {
109
  "epoch": 2.3612750885478158,
110
- "grad_norm": 8.495936393737793,
111
  "learning_rate": 1.928826092697742e-07,
112
- "loss": 1.2448,
113
  "step": 6000
114
  },
115
  {
116
  "epoch": 2.558048012593467,
117
- "grad_norm": 8.68142318725586,
118
  "learning_rate": 1.3346097979664598e-07,
119
- "loss": 1.2507,
120
  "step": 6500
121
  },
122
  {
123
  "epoch": 2.7548209366391183,
124
- "grad_norm": 11.802330017089844,
125
  "learning_rate": 7.403935032351777e-08,
126
- "loss": 1.2448,
127
  "step": 7000
128
  },
129
  {
130
  "epoch": 2.9515938606847696,
131
- "grad_norm": 8.9815673828125,
132
  "learning_rate": 1.4617720850389543e-08,
133
- "loss": 1.2538,
134
  "step": 7500
135
  },
136
  {
137
  "epoch": 3.0,
138
- "eval_accuracy": 0.6566928697962799,
139
- "eval_loss": 1.3125017881393433,
140
- "eval_model_preparation_time": 0.0054,
141
- "eval_runtime": 1235.4205,
142
- "eval_samples_per_second": 87.73,
143
- "eval_steps_per_second": 2.742,
144
  "step": 7623
145
  }
146
  ],
 
1
  {
2
+ "best_metric": 1.2215194702148438,
3
  "best_model_checkpoint": "cat_breed_image_detection/checkpoint-7623",
4
  "epoch": 3.0,
5
  "eval_steps": 500,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.19677292404565133,
13
+ "grad_norm": 13.002593994140625,
14
  "learning_rate": 8.465205334741845e-07,
15
+ "loss": 1.2055,
16
  "step": 500
17
  },
18
  {
19
  "epoch": 0.39354584809130266,
20
+ "grad_norm": 9.049211502075195,
21
  "learning_rate": 7.870989040010564e-07,
22
+ "loss": 1.1934,
23
  "step": 1000
24
  },
25
  {
26
  "epoch": 0.5903187721369539,
27
+ "grad_norm": 13.344831466674805,
28
  "learning_rate": 7.276772745279281e-07,
29
+ "loss": 1.179,
30
  "step": 1500
31
  },
32
  {
33
  "epoch": 0.7870916961826053,
34
+ "grad_norm": 11.866278648376465,
35
  "learning_rate": 6.682556450547999e-07,
36
+ "loss": 1.1771,
37
  "step": 2000
38
  },
39
  {
40
  "epoch": 0.9838646202282566,
41
+ "grad_norm": 8.955316543579102,
42
  "learning_rate": 6.088340155816718e-07,
43
+ "loss": 1.1718,
44
  "step": 2500
45
  },
46
  {
47
  "epoch": 1.0,
48
+ "eval_accuracy": 0.6744261145556539,
49
+ "eval_loss": 1.2367281913757324,
50
+ "eval_model_preparation_time": 0.0053,
51
+ "eval_runtime": 1198.4464,
52
+ "eval_samples_per_second": 90.437,
53
+ "eval_steps_per_second": 2.826,
54
  "step": 2541
55
  },
56
  {
57
  "epoch": 1.1806375442739079,
58
+ "grad_norm": 8.82983684539795,
59
  "learning_rate": 5.494123861085435e-07,
60
+ "loss": 1.1671,
61
  "step": 3000
62
  },
63
  {
64
  "epoch": 1.3774104683195592,
65
+ "grad_norm": 10.22899055480957,
66
  "learning_rate": 4.899907566354153e-07,
67
+ "loss": 1.1673,
68
  "step": 3500
69
  },
70
  {
71
  "epoch": 1.5741833923652107,
72
+ "grad_norm": 7.556177139282227,
73
  "learning_rate": 4.3056912716228705e-07,
74
+ "loss": 1.182,
75
  "step": 4000
76
  },
77
  {
78
  "epoch": 1.770956316410862,
79
+ "grad_norm": 7.892594814300537,
80
  "learning_rate": 3.7114749768915883e-07,
81
+ "loss": 1.1562,
82
  "step": 4500
83
  },
84
  {
85
  "epoch": 1.9677292404565132,
86
+ "grad_norm": 7.493433952331543,
87
  "learning_rate": 3.117258682160306e-07,
88
+ "loss": 1.1524,
89
  "step": 5000
90
  },
91
  {
92
  "epoch": 2.0,
93
+ "eval_accuracy": 0.67734167404783,
94
+ "eval_loss": 1.2256686687469482,
95
+ "eval_model_preparation_time": 0.0053,
96
+ "eval_runtime": 1196.6469,
97
+ "eval_samples_per_second": 90.573,
98
+ "eval_steps_per_second": 2.83,
99
  "step": 5082
100
  },
101
  {
102
  "epoch": 2.1645021645021645,
103
+ "grad_norm": 7.134050369262695,
104
  "learning_rate": 2.523042387429024e-07,
105
+ "loss": 1.1482,
106
  "step": 5500
107
  },
108
  {
109
  "epoch": 2.3612750885478158,
110
+ "grad_norm": 11.738887786865234,
111
  "learning_rate": 1.928826092697742e-07,
112
+ "loss": 1.1562,
113
  "step": 6000
114
  },
115
  {
116
  "epoch": 2.558048012593467,
117
+ "grad_norm": 8.556049346923828,
118
  "learning_rate": 1.3346097979664598e-07,
119
+ "loss": 1.1478,
120
  "step": 6500
121
  },
122
  {
123
  "epoch": 2.7548209366391183,
124
+ "grad_norm": 7.244868755340576,
125
  "learning_rate": 7.403935032351777e-08,
126
+ "loss": 1.1521,
127
  "step": 7000
128
  },
129
  {
130
  "epoch": 2.9515938606847696,
131
+ "grad_norm": 10.283246040344238,
132
  "learning_rate": 1.4617720850389543e-08,
133
+ "loss": 1.1567,
134
  "step": 7500
135
  },
136
  {
137
  "epoch": 3.0,
138
+ "eval_accuracy": 0.6776645999409507,
139
+ "eval_loss": 1.2215194702148438,
140
+ "eval_model_preparation_time": 0.0053,
141
+ "eval_runtime": 1204.575,
142
+ "eval_samples_per_second": 89.977,
143
+ "eval_steps_per_second": 2.812,
144
  "step": 7623
145
  }
146
  ],
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3a3b372e31df38748987435df2ea69465e5e4c83db458e0fac6563ddd192b1e
3
  size 343365480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:342fd0d652045819eaa7b50802aa55cd6efea8c969aea62fbb0d9f9dd2627d34
3
  size 343365480
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c6e2b24c2bb418666641ee5942c486949ee0d8efdf7de643fb67fbd8451ce35
3
  size 4667
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ddce706ee826114a81e5dc98c1b4a675a40b3cf07bb0062afea81a5ff747285
3
  size 4667