NeuronZero commited on
Commit
ab95481
1 Parent(s): aef9619

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - autotrain
5
+ - image-classification
6
+ widget:
7
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg
8
+ example_title: Tiger
9
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg
10
+ example_title: Teapot
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg
12
+ example_title: Palace
13
+ datasets:
14
+ - CXR-Classifier/autotrain-data
15
+ ---
16
+
17
+ # Model Trained Using AutoTrain
18
+
19
+ - Problem type: Image Classification
20
+
21
+ ## Validation Metrics
22
+ loss: 0.1180819422006607
23
+
24
+ f1: 0.9755686604886269
25
+
26
+ precision: 0.9780405405405406
27
+
28
+ recall: 0.973109243697479
29
+
30
+ auc: 0.9916270580630442
31
+
32
+ accuracy: 0.9644607843137255
checkpoint-1224/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "NORMAL",
14
+ "1": "PNEUMONIA"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "NORMAL": 0,
21
+ "PNEUMONIA": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.39.0"
33
+ }
checkpoint-1224/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80584abcc67d75cd6b1bb645f5ea036d18ddcdef533578238eb8283f6cfc5f15
3
+ size 343223968
checkpoint-1224/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d72869a1789240ec793fc99d9052d02e4045c77da432a56e37b2a712c0cd97c
3
+ size 686563258
checkpoint-1224/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:201277254cf0be48903edd44f2819445010c7718a2efa3c34342549b1428bddc
3
+ size 13926
checkpoint-1224/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a75058089711ac1cb44d3cfbe67bc4fc5de734191fe64c892a0c6a5988b115
3
+ size 1064
checkpoint-1224/trainer_state.json ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.1180819422006607,
3
+ "best_model_checkpoint": "CXR-Classifier/checkpoint-1224",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1224,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "grad_norm": 3.592426061630249,
14
+ "learning_rate": 8.130081300813009e-06,
15
+ "loss": 0.5972,
16
+ "step": 20
17
+ },
18
+ {
19
+ "epoch": 0.1,
20
+ "grad_norm": 2.819566249847412,
21
+ "learning_rate": 1.6260162601626018e-05,
22
+ "loss": 0.4976,
23
+ "step": 40
24
+ },
25
+ {
26
+ "epoch": 0.15,
27
+ "grad_norm": 5.789632320404053,
28
+ "learning_rate": 2.4390243902439026e-05,
29
+ "loss": 0.3321,
30
+ "step": 60
31
+ },
32
+ {
33
+ "epoch": 0.2,
34
+ "grad_norm": 5.508607864379883,
35
+ "learning_rate": 3.2520325203252037e-05,
36
+ "loss": 0.415,
37
+ "step": 80
38
+ },
39
+ {
40
+ "epoch": 0.25,
41
+ "grad_norm": 7.562315464019775,
42
+ "learning_rate": 4.065040650406504e-05,
43
+ "loss": 0.2412,
44
+ "step": 100
45
+ },
46
+ {
47
+ "epoch": 0.29,
48
+ "grad_norm": 4.414723873138428,
49
+ "learning_rate": 4.878048780487805e-05,
50
+ "loss": 0.3456,
51
+ "step": 120
52
+ },
53
+ {
54
+ "epoch": 0.34,
55
+ "grad_norm": 2.0423896312713623,
56
+ "learning_rate": 4.922797456857402e-05,
57
+ "loss": 0.2415,
58
+ "step": 140
59
+ },
60
+ {
61
+ "epoch": 0.39,
62
+ "grad_norm": 2.041806221008301,
63
+ "learning_rate": 4.83197093551317e-05,
64
+ "loss": 0.3546,
65
+ "step": 160
66
+ },
67
+ {
68
+ "epoch": 0.44,
69
+ "grad_norm": 1.6937503814697266,
70
+ "learning_rate": 4.741144414168938e-05,
71
+ "loss": 0.3947,
72
+ "step": 180
73
+ },
74
+ {
75
+ "epoch": 0.49,
76
+ "grad_norm": 6.492763996124268,
77
+ "learning_rate": 4.650317892824705e-05,
78
+ "loss": 0.3063,
79
+ "step": 200
80
+ },
81
+ {
82
+ "epoch": 0.54,
83
+ "grad_norm": 1.9708950519561768,
84
+ "learning_rate": 4.559491371480473e-05,
85
+ "loss": 0.3115,
86
+ "step": 220
87
+ },
88
+ {
89
+ "epoch": 0.59,
90
+ "grad_norm": 12.533012390136719,
91
+ "learning_rate": 4.46866485013624e-05,
92
+ "loss": 0.5087,
93
+ "step": 240
94
+ },
95
+ {
96
+ "epoch": 0.64,
97
+ "grad_norm": 8.02456283569336,
98
+ "learning_rate": 4.377838328792008e-05,
99
+ "loss": 0.2745,
100
+ "step": 260
101
+ },
102
+ {
103
+ "epoch": 0.69,
104
+ "grad_norm": 1.0878229141235352,
105
+ "learning_rate": 4.287011807447775e-05,
106
+ "loss": 0.1905,
107
+ "step": 280
108
+ },
109
+ {
110
+ "epoch": 0.74,
111
+ "grad_norm": 7.465769290924072,
112
+ "learning_rate": 4.196185286103542e-05,
113
+ "loss": 0.2509,
114
+ "step": 300
115
+ },
116
+ {
117
+ "epoch": 0.78,
118
+ "grad_norm": 15.646003723144531,
119
+ "learning_rate": 4.10535876475931e-05,
120
+ "loss": 0.4353,
121
+ "step": 320
122
+ },
123
+ {
124
+ "epoch": 0.83,
125
+ "grad_norm": 3.2481565475463867,
126
+ "learning_rate": 4.014532243415077e-05,
127
+ "loss": 0.3478,
128
+ "step": 340
129
+ },
130
+ {
131
+ "epoch": 0.88,
132
+ "grad_norm": 2.395519733428955,
133
+ "learning_rate": 3.923705722070845e-05,
134
+ "loss": 0.2199,
135
+ "step": 360
136
+ },
137
+ {
138
+ "epoch": 0.93,
139
+ "grad_norm": 8.089118003845215,
140
+ "learning_rate": 3.832879200726612e-05,
141
+ "loss": 0.2715,
142
+ "step": 380
143
+ },
144
+ {
145
+ "epoch": 0.98,
146
+ "grad_norm": 8.150867462158203,
147
+ "learning_rate": 3.74205267938238e-05,
148
+ "loss": 0.2074,
149
+ "step": 400
150
+ },
151
+ {
152
+ "epoch": 1.0,
153
+ "eval_accuracy": 0.9387254901960784,
154
+ "eval_auc": 0.9766835240883684,
155
+ "eval_f1": 0.957841483979764,
156
+ "eval_loss": 0.2350389063358307,
157
+ "eval_precision": 0.961082910321489,
158
+ "eval_recall": 0.9546218487394958,
159
+ "eval_runtime": 246.6561,
160
+ "eval_samples_per_second": 3.308,
161
+ "eval_steps_per_second": 0.207,
162
+ "step": 408
163
+ },
164
+ {
165
+ "epoch": 1.03,
166
+ "grad_norm": 6.791078090667725,
167
+ "learning_rate": 3.651226158038147e-05,
168
+ "loss": 0.1235,
169
+ "step": 420
170
+ },
171
+ {
172
+ "epoch": 1.08,
173
+ "grad_norm": 5.592333793640137,
174
+ "learning_rate": 3.560399636693915e-05,
175
+ "loss": 0.4199,
176
+ "step": 440
177
+ },
178
+ {
179
+ "epoch": 1.13,
180
+ "grad_norm": 0.2713923752307892,
181
+ "learning_rate": 3.469573115349682e-05,
182
+ "loss": 0.3119,
183
+ "step": 460
184
+ },
185
+ {
186
+ "epoch": 1.18,
187
+ "grad_norm": 5.907072067260742,
188
+ "learning_rate": 3.37874659400545e-05,
189
+ "loss": 0.2118,
190
+ "step": 480
191
+ },
192
+ {
193
+ "epoch": 1.23,
194
+ "grad_norm": 0.9097113013267517,
195
+ "learning_rate": 3.287920072661217e-05,
196
+ "loss": 0.2174,
197
+ "step": 500
198
+ },
199
+ {
200
+ "epoch": 1.27,
201
+ "grad_norm": 6.9212141036987305,
202
+ "learning_rate": 3.197093551316985e-05,
203
+ "loss": 0.2448,
204
+ "step": 520
205
+ },
206
+ {
207
+ "epoch": 1.32,
208
+ "grad_norm": 6.113616466522217,
209
+ "learning_rate": 3.106267029972752e-05,
210
+ "loss": 0.1619,
211
+ "step": 540
212
+ },
213
+ {
214
+ "epoch": 1.37,
215
+ "grad_norm": 0.9741531014442444,
216
+ "learning_rate": 3.0154405086285197e-05,
217
+ "loss": 0.3296,
218
+ "step": 560
219
+ },
220
+ {
221
+ "epoch": 1.42,
222
+ "grad_norm": 1.604313611984253,
223
+ "learning_rate": 2.924613987284287e-05,
224
+ "loss": 0.1598,
225
+ "step": 580
226
+ },
227
+ {
228
+ "epoch": 1.47,
229
+ "grad_norm": 5.160298824310303,
230
+ "learning_rate": 2.8337874659400547e-05,
231
+ "loss": 0.2605,
232
+ "step": 600
233
+ },
234
+ {
235
+ "epoch": 1.52,
236
+ "grad_norm": 7.961933135986328,
237
+ "learning_rate": 2.7429609445958222e-05,
238
+ "loss": 0.295,
239
+ "step": 620
240
+ },
241
+ {
242
+ "epoch": 1.57,
243
+ "grad_norm": 3.545825719833374,
244
+ "learning_rate": 2.6521344232515894e-05,
245
+ "loss": 0.2613,
246
+ "step": 640
247
+ },
248
+ {
249
+ "epoch": 1.62,
250
+ "grad_norm": 0.7656643390655518,
251
+ "learning_rate": 2.5613079019073572e-05,
252
+ "loss": 0.1684,
253
+ "step": 660
254
+ },
255
+ {
256
+ "epoch": 1.67,
257
+ "grad_norm": 14.269344329833984,
258
+ "learning_rate": 2.4704813805631247e-05,
259
+ "loss": 0.3285,
260
+ "step": 680
261
+ },
262
+ {
263
+ "epoch": 1.72,
264
+ "grad_norm": 0.21142134070396423,
265
+ "learning_rate": 2.379654859218892e-05,
266
+ "loss": 0.2071,
267
+ "step": 700
268
+ },
269
+ {
270
+ "epoch": 1.76,
271
+ "grad_norm": 1.0282666683197021,
272
+ "learning_rate": 2.2888283378746594e-05,
273
+ "loss": 0.2701,
274
+ "step": 720
275
+ },
276
+ {
277
+ "epoch": 1.81,
278
+ "grad_norm": 12.365777969360352,
279
+ "learning_rate": 2.198001816530427e-05,
280
+ "loss": 0.1753,
281
+ "step": 740
282
+ },
283
+ {
284
+ "epoch": 1.86,
285
+ "grad_norm": 6.909509181976318,
286
+ "learning_rate": 2.1071752951861944e-05,
287
+ "loss": 0.185,
288
+ "step": 760
289
+ },
290
+ {
291
+ "epoch": 1.91,
292
+ "grad_norm": 10.059576034545898,
293
+ "learning_rate": 2.016348773841962e-05,
294
+ "loss": 0.1403,
295
+ "step": 780
296
+ },
297
+ {
298
+ "epoch": 1.96,
299
+ "grad_norm": 13.194554328918457,
300
+ "learning_rate": 1.9255222524977297e-05,
301
+ "loss": 0.177,
302
+ "step": 800
303
+ },
304
+ {
305
+ "epoch": 2.0,
306
+ "eval_accuracy": 0.9522058823529411,
307
+ "eval_auc": 0.9864329442184113,
308
+ "eval_f1": 0.967418546365915,
309
+ "eval_loss": 0.15405645966529846,
310
+ "eval_precision": 0.9617940199335548,
311
+ "eval_recall": 0.973109243697479,
312
+ "eval_runtime": 257.0506,
313
+ "eval_samples_per_second": 3.174,
314
+ "eval_steps_per_second": 0.198,
315
+ "step": 816
316
+ },
317
+ {
318
+ "epoch": 2.01,
319
+ "grad_norm": 0.45505988597869873,
320
+ "learning_rate": 1.834695731153497e-05,
321
+ "loss": 0.1334,
322
+ "step": 820
323
+ },
324
+ {
325
+ "epoch": 2.06,
326
+ "grad_norm": 0.5608593821525574,
327
+ "learning_rate": 1.7438692098092644e-05,
328
+ "loss": 0.1801,
329
+ "step": 840
330
+ },
331
+ {
332
+ "epoch": 2.11,
333
+ "grad_norm": 1.9215396642684937,
334
+ "learning_rate": 1.653042688465032e-05,
335
+ "loss": 0.1397,
336
+ "step": 860
337
+ },
338
+ {
339
+ "epoch": 2.16,
340
+ "grad_norm": 0.03459596261382103,
341
+ "learning_rate": 1.5622161671207994e-05,
342
+ "loss": 0.0797,
343
+ "step": 880
344
+ },
345
+ {
346
+ "epoch": 2.21,
347
+ "grad_norm": 4.931589603424072,
348
+ "learning_rate": 1.4713896457765669e-05,
349
+ "loss": 0.1547,
350
+ "step": 900
351
+ },
352
+ {
353
+ "epoch": 2.25,
354
+ "grad_norm": 12.403867721557617,
355
+ "learning_rate": 1.3805631244323344e-05,
356
+ "loss": 0.1008,
357
+ "step": 920
358
+ },
359
+ {
360
+ "epoch": 2.3,
361
+ "grad_norm": 6.834578514099121,
362
+ "learning_rate": 1.2897366030881017e-05,
363
+ "loss": 0.3086,
364
+ "step": 940
365
+ },
366
+ {
367
+ "epoch": 2.35,
368
+ "grad_norm": 0.12356822937726974,
369
+ "learning_rate": 1.1989100817438692e-05,
370
+ "loss": 0.1367,
371
+ "step": 960
372
+ },
373
+ {
374
+ "epoch": 2.4,
375
+ "grad_norm": 0.23836758732795715,
376
+ "learning_rate": 1.1080835603996367e-05,
377
+ "loss": 0.1204,
378
+ "step": 980
379
+ },
380
+ {
381
+ "epoch": 2.45,
382
+ "grad_norm": 0.645460307598114,
383
+ "learning_rate": 1.0172570390554042e-05,
384
+ "loss": 0.2857,
385
+ "step": 1000
386
+ },
387
+ {
388
+ "epoch": 2.5,
389
+ "grad_norm": 6.155028820037842,
390
+ "learning_rate": 9.264305177111717e-06,
391
+ "loss": 0.1514,
392
+ "step": 1020
393
+ },
394
+ {
395
+ "epoch": 2.55,
396
+ "grad_norm": 6.625197410583496,
397
+ "learning_rate": 8.356039963669392e-06,
398
+ "loss": 0.1973,
399
+ "step": 1040
400
+ },
401
+ {
402
+ "epoch": 2.6,
403
+ "grad_norm": 0.4476400911808014,
404
+ "learning_rate": 7.447774750227067e-06,
405
+ "loss": 0.1153,
406
+ "step": 1060
407
+ },
408
+ {
409
+ "epoch": 2.65,
410
+ "grad_norm": 11.432110786437988,
411
+ "learning_rate": 6.539509536784741e-06,
412
+ "loss": 0.1943,
413
+ "step": 1080
414
+ },
415
+ {
416
+ "epoch": 2.7,
417
+ "grad_norm": 6.038093090057373,
418
+ "learning_rate": 5.631244323342416e-06,
419
+ "loss": 0.0998,
420
+ "step": 1100
421
+ },
422
+ {
423
+ "epoch": 2.75,
424
+ "grad_norm": 0.24591827392578125,
425
+ "learning_rate": 4.722979109900091e-06,
426
+ "loss": 0.1767,
427
+ "step": 1120
428
+ },
429
+ {
430
+ "epoch": 2.79,
431
+ "grad_norm": 3.9476640224456787,
432
+ "learning_rate": 3.814713896457766e-06,
433
+ "loss": 0.1798,
434
+ "step": 1140
435
+ },
436
+ {
437
+ "epoch": 2.84,
438
+ "grad_norm": 9.382974624633789,
439
+ "learning_rate": 2.9064486830154405e-06,
440
+ "loss": 0.1707,
441
+ "step": 1160
442
+ },
443
+ {
444
+ "epoch": 2.89,
445
+ "grad_norm": 0.10719335079193115,
446
+ "learning_rate": 1.9981834695731155e-06,
447
+ "loss": 0.2662,
448
+ "step": 1180
449
+ },
450
+ {
451
+ "epoch": 2.94,
452
+ "grad_norm": 10.07032299041748,
453
+ "learning_rate": 1.0899182561307902e-06,
454
+ "loss": 0.218,
455
+ "step": 1200
456
+ },
457
+ {
458
+ "epoch": 2.99,
459
+ "grad_norm": 17.199472427368164,
460
+ "learning_rate": 1.8165304268846503e-07,
461
+ "loss": 0.1692,
462
+ "step": 1220
463
+ },
464
+ {
465
+ "epoch": 3.0,
466
+ "eval_accuracy": 0.9644607843137255,
467
+ "eval_auc": 0.9916270580630442,
468
+ "eval_f1": 0.9755686604886269,
469
+ "eval_loss": 0.1180819422006607,
470
+ "eval_precision": 0.9780405405405406,
471
+ "eval_recall": 0.973109243697479,
472
+ "eval_runtime": 252.4161,
473
+ "eval_samples_per_second": 3.233,
474
+ "eval_steps_per_second": 0.202,
475
+ "step": 1224
476
+ }
477
+ ],
478
+ "logging_steps": 20,
479
+ "max_steps": 1224,
480
+ "num_input_tokens_seen": 0,
481
+ "num_train_epochs": 3,
482
+ "save_steps": 500,
483
+ "total_flos": 7.581041343995535e+17,
484
+ "train_batch_size": 8,
485
+ "trial_name": null,
486
+ "trial_params": null
487
+ }
checkpoint-1224/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fcde71267faef3288902b885e4aa4cfb36a7b0ebfeccfd0e1505e2ba3c2e468
3
+ size 4920
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "_num_labels": 2,
4
+ "architectures": [
5
+ "ViTForImageClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.0,
8
+ "encoder_stride": 16,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.0,
11
+ "hidden_size": 768,
12
+ "id2label": {
13
+ "0": "NORMAL",
14
+ "1": "PNEUMONIA"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "NORMAL": 0,
21
+ "PNEUMONIA": 1
22
+ },
23
+ "layer_norm_eps": 1e-12,
24
+ "model_type": "vit",
25
+ "num_attention_heads": 12,
26
+ "num_channels": 3,
27
+ "num_hidden_layers": 12,
28
+ "patch_size": 16,
29
+ "problem_type": "single_label_classification",
30
+ "qkv_bias": true,
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.39.0"
33
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80584abcc67d75cd6b1bb645f5ea036d18ddcdef533578238eb8283f6cfc5f15
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fcde71267faef3288902b885e4aa4cfb36a7b0ebfeccfd0e1505e2ba3c2e468
3
+ size 4920
training_params.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "data_path": "CXR-Classifier/autotrain-data",
3
+ "model": "google/vit-base-patch16-224",
4
+ "username": "NeuronZero",
5
+ "lr": 5e-05,
6
+ "epochs": 3,
7
+ "batch_size": 8,
8
+ "warmup_ratio": 0.1,
9
+ "gradient_accumulation": 1,
10
+ "optimizer": "adamw_torch",
11
+ "scheduler": "linear",
12
+ "weight_decay": 0.0,
13
+ "max_grad_norm": 1.0,
14
+ "seed": 42,
15
+ "train_split": "train",
16
+ "valid_split": "validation",
17
+ "logging_steps": -1,
18
+ "project_name": "CXR-Classifier",
19
+ "auto_find_batch_size": false,
20
+ "mixed_precision": "fp8",
21
+ "save_total_limit": 1,
22
+ "save_strategy": "epoch",
23
+ "push_to_hub": true,
24
+ "repo_id": "NeuronZero/CXR-Classifier",
25
+ "evaluation_strategy": "epoch",
26
+ "image_column": "autotrain_image",
27
+ "target_column": "autotrain_label",
28
+ "log": "none"
29
+ }