yaniseuranova commited on
Commit
6303895
1 Parent(s): 106f0b7

🍻 cheers

Browse files
README.md CHANGED
@@ -2,7 +2,11 @@
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
 
 
5
  - generated_from_trainer
 
 
6
  model-index:
7
  - name: vit-base-beans
8
  results: []
@@ -13,7 +17,10 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # vit-base-beans
15
 
16
- This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
 
 
 
17
 
18
  ## Model description
19
 
 
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
5
+ - image-classification
6
+ - vision
7
  - generated_from_trainer
8
+ metrics:
9
+ - accuracy
10
  model-index:
11
  - name: vit-base-beans
12
  results: []
 
17
 
18
  # vit-base-beans
19
 
20
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.0172
23
+ - Accuracy: 1.0
24
 
25
  ## Model description
26
 
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
- "eval_loss": 0.024416429921984673,
5
- "eval_runtime": 2.0478,
6
- "eval_samples_per_second": 28.324,
7
- "eval_steps_per_second": 3.907,
8
- "total_flos": 1.7978141590585344e+16,
9
- "train_loss": 0.3237314224243164,
10
- "train_runtime": 22.8416,
11
- "train_samples_per_second": 10.157,
12
- "train_steps_per_second": 2.627
13
  }
 
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
+ "eval_loss": 0.01724555343389511,
5
+ "eval_runtime": 2.9509,
6
+ "eval_samples_per_second": 29.822,
7
+ "eval_steps_per_second": 3.728,
8
+ "total_flos": 2.7277180344336384e+16,
9
+ "train_loss": 0.29457727345553314,
10
+ "train_runtime": 29.6451,
11
+ "train_samples_per_second": 11.874,
12
+ "train_steps_per_second": 2.968
13
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
- "eval_loss": 0.024416429921984673,
5
- "eval_runtime": 2.0478,
6
- "eval_samples_per_second": 28.324,
7
- "eval_steps_per_second": 3.907
8
  }
 
1
  {
2
  "epoch": 4.0,
3
  "eval_accuracy": 1.0,
4
+ "eval_loss": 0.01724555343389511,
5
+ "eval_runtime": 2.9509,
6
+ "eval_samples_per_second": 29.822,
7
+ "eval_steps_per_second": 3.728
8
  }
runs/Aug30_08-56-26_7e2de4304170/events.out.tfevents.1725008239.7e2de4304170 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baa5827c224a251d2762b3ba503aef52f2838e578619c1a16d3f754a6a24d66c
3
+ size 40
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 4.0,
3
- "total_flos": 1.7978141590585344e+16,
4
- "train_loss": 0.3237314224243164,
5
- "train_runtime": 22.8416,
6
- "train_samples_per_second": 10.157,
7
- "train_steps_per_second": 2.627
8
  }
 
1
  {
2
  "epoch": 4.0,
3
+ "total_flos": 2.7277180344336384e+16,
4
+ "train_loss": 0.29457727345553314,
5
+ "train_runtime": 29.6451,
6
+ "train_samples_per_second": 11.874,
7
+ "train_steps_per_second": 2.968
8
  }
trainer_state.json CHANGED
@@ -3,65 +3,79 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 100,
6
- "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.6666666666666666,
13
- "grad_norm": 2.1028881072998047,
14
- "learning_rate": 0.0001666666666666667,
15
- "loss": 0.609,
16
  "step": 10
17
  },
18
  {
19
- "epoch": 1.3333333333333333,
20
- "grad_norm": 5.82706880569458,
21
- "learning_rate": 0.00013333333333333334,
22
- "loss": 0.6172,
23
  "step": 20
24
  },
25
  {
26
- "epoch": 2.0,
27
- "grad_norm": 10.371822357177734,
28
- "learning_rate": 0.0001,
29
- "loss": 0.4026,
30
  "step": 30
31
  },
32
  {
33
- "epoch": 2.6666666666666665,
34
- "grad_norm": 0.941929817199707,
35
- "learning_rate": 6.666666666666667e-05,
36
- "loss": 0.171,
37
  "step": 40
38
  },
39
  {
40
- "epoch": 3.3333333333333335,
41
- "grad_norm": 0.1865934133529663,
42
- "learning_rate": 3.3333333333333335e-05,
43
- "loss": 0.114,
44
  "step": 50
45
  },
46
  {
47
- "epoch": 4.0,
48
- "grad_norm": 0.20858462154865265,
49
- "learning_rate": 0.0,
50
- "loss": 0.0287,
51
  "step": 60
52
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  {
54
  "epoch": 4.0,
55
- "step": 60,
56
- "total_flos": 1.7978141590585344e+16,
57
- "train_loss": 0.3237314224243164,
58
- "train_runtime": 22.8416,
59
- "train_samples_per_second": 10.157,
60
- "train_steps_per_second": 2.627
61
  }
62
  ],
63
  "logging_steps": 10,
64
- "max_steps": 60,
65
  "num_input_tokens_seen": 0,
66
  "num_train_epochs": 4,
67
  "save_steps": 100,
@@ -77,7 +91,7 @@
77
  "attributes": {}
78
  }
79
  },
80
- "total_flos": 1.7978141590585344e+16,
81
  "train_batch_size": 4,
82
  "trial_name": null,
83
  "trial_params": null
 
3
  "best_model_checkpoint": null,
4
  "epoch": 4.0,
5
  "eval_steps": 100,
6
+ "global_step": 88,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.45454545454545453,
13
+ "grad_norm": 2.2645957469940186,
14
+ "learning_rate": 0.00017727272727272728,
15
+ "loss": 0.5609,
16
  "step": 10
17
  },
18
  {
19
+ "epoch": 0.9090909090909091,
20
+ "grad_norm": 3.18221116065979,
21
+ "learning_rate": 0.00015454545454545454,
22
+ "loss": 0.8267,
23
  "step": 20
24
  },
25
  {
26
+ "epoch": 1.3636363636363638,
27
+ "grad_norm": 3.1673595905303955,
28
+ "learning_rate": 0.0001318181818181818,
29
+ "loss": 0.5182,
30
  "step": 30
31
  },
32
  {
33
+ "epoch": 1.8181818181818183,
34
+ "grad_norm": 0.5910093784332275,
35
+ "learning_rate": 0.00010909090909090909,
36
+ "loss": 0.2435,
37
  "step": 40
38
  },
39
  {
40
+ "epoch": 2.2727272727272725,
41
+ "grad_norm": 3.6887409687042236,
42
+ "learning_rate": 8.636363636363637e-05,
43
+ "loss": 0.2545,
44
  "step": 50
45
  },
46
  {
47
+ "epoch": 2.7272727272727275,
48
+ "grad_norm": 0.7244749665260315,
49
+ "learning_rate": 6.363636363636364e-05,
50
+ "loss": 0.1302,
51
  "step": 60
52
  },
53
+ {
54
+ "epoch": 3.1818181818181817,
55
+ "grad_norm": 0.1487322300672531,
56
+ "learning_rate": 4.0909090909090915e-05,
57
+ "loss": 0.0252,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 3.6363636363636362,
62
+ "grad_norm": 0.1414584219455719,
63
+ "learning_rate": 1.8181818181818182e-05,
64
+ "loss": 0.0192,
65
+ "step": 80
66
+ },
67
  {
68
  "epoch": 4.0,
69
+ "step": 88,
70
+ "total_flos": 2.7277180344336384e+16,
71
+ "train_loss": 0.29457727345553314,
72
+ "train_runtime": 29.6451,
73
+ "train_samples_per_second": 11.874,
74
+ "train_steps_per_second": 2.968
75
  }
76
  ],
77
  "logging_steps": 10,
78
+ "max_steps": 88,
79
  "num_input_tokens_seen": 0,
80
  "num_train_epochs": 4,
81
  "save_steps": 100,
 
91
  "attributes": {}
92
  }
93
  },
94
+ "total_flos": 2.7277180344336384e+16,
95
  "train_batch_size": 4,
96
  "trial_name": null,
97
  "trial_params": null