zazazaChiang commited on
Commit
4f47372
1 Parent(s): ae82c42

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224-in21k
5
  tags:
 
 
6
  - generated_from_trainer
7
  datasets:
8
  - arrow
@@ -23,7 +25,7 @@ model-index:
23
  metrics:
24
  - name: Accuracy
25
  type: accuracy
26
- value: 0.9774436090225563
27
  ---
28
 
29
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -33,8 +35,8 @@ should probably proofread and complete it, then remove this comment. -->
33
 
34
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the arrow dataset.
35
  It achieves the following results on the evaluation set:
36
- - Accuracy: 0.9774
37
- - Loss: 0.0842
38
 
39
  ## Model description
40
 
 
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224-in21k
5
  tags:
6
+ - image-classification
7
+ - vision
8
  - generated_from_trainer
9
  datasets:
10
  - arrow
 
25
  metrics:
26
  - name: Accuracy
27
  type: accuracy
28
+ value: 0.9924812030075187
29
  ---
30
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
35
 
36
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the arrow dataset.
37
  It achieves the following results on the evaluation set:
38
+ - Loss: 0.0667
39
+ - Accuracy: 0.9925
40
 
41
  ## Model description
42
 
all_results.json CHANGED
@@ -2,12 +2,12 @@
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.0667242482304573,
5
- "eval_runtime": 11.3968,
6
- "eval_samples_per_second": 11.67,
7
- "eval_steps_per_second": 1.492,
8
  "total_flos": 4.006371770595533e+17,
9
  "train_loss": 0.0,
10
- "train_runtime": 0.7374,
11
- "train_samples_per_second": 7010.941,
12
- "train_steps_per_second": 881.453
13
  }
 
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.0667242482304573,
5
+ "eval_runtime": 7.7344,
6
+ "eval_samples_per_second": 17.196,
7
+ "eval_steps_per_second": 2.198,
8
  "total_flos": 4.006371770595533e+17,
9
  "train_loss": 0.0,
10
+ "train_runtime": 0.5041,
11
+ "train_samples_per_second": 10256.885,
12
+ "train_steps_per_second": 1289.55
13
  }
config.json CHANGED
@@ -28,6 +28,7 @@
28
  "num_channels": 3,
29
  "num_hidden_layers": 12,
30
  "patch_size": 16,
 
31
  "qkv_bias": true,
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.44.2"
 
28
  "num_channels": 3,
29
  "num_hidden_layers": 12,
30
  "patch_size": 16,
31
+ "problem_type": "single_label_classification",
32
  "qkv_bias": true,
33
  "torch_dtype": "float32",
34
  "transformers_version": "4.44.2"
eval_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.0667242482304573,
5
- "eval_runtime": 11.3968,
6
- "eval_samples_per_second": 11.67,
7
- "eval_steps_per_second": 1.492
8
  }
 
2
  "epoch": 5.0,
3
  "eval_accuracy": 0.9924812030075187,
4
  "eval_loss": 0.0667242482304573,
5
+ "eval_runtime": 7.7344,
6
+ "eval_samples_per_second": 17.196,
7
+ "eval_steps_per_second": 2.198
8
  }
train_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 5.0,
3
  "total_flos": 4.006371770595533e+17,
4
  "train_loss": 0.0,
5
- "train_runtime": 0.7374,
6
- "train_samples_per_second": 7010.941,
7
- "train_steps_per_second": 881.453
8
  }
 
2
  "epoch": 5.0,
3
  "total_flos": 4.006371770595533e+17,
4
  "train_loss": 0.0,
5
+ "train_runtime": 0.5041,
6
+ "train_samples_per_second": 10256.885,
7
+ "train_steps_per_second": 1289.55
8
  }
trainer_state.json CHANGED
@@ -513,9 +513,9 @@
513
  "step": 650,
514
  "total_flos": 4.006371770595533e+17,
515
  "train_loss": 0.0,
516
- "train_runtime": 0.7374,
517
- "train_samples_per_second": 7010.941,
518
- "train_steps_per_second": 881.453
519
  }
520
  ],
521
  "logging_steps": 10,
 
513
  "step": 650,
514
  "total_flos": 4.006371770595533e+17,
515
  "train_loss": 0.0,
516
+ "train_runtime": 0.5041,
517
+ "train_samples_per_second": 10256.885,
518
+ "train_steps_per_second": 1289.55
519
  }
520
  ],
521
  "logging_steps": 10,