Model save
Browse files- README.md +3 -3
- all_results.json +4 -9
- train_results.json +4 -4
- trainer_state.json +46 -46
README.md
CHANGED
@@ -20,7 +20,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
20 |
|
21 |
This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the generator dataset.
|
22 |
It achieves the following results on the evaluation set:
|
23 |
-
- Loss:
|
24 |
|
25 |
## Model description
|
26 |
|
@@ -57,12 +57,12 @@ The following hyperparameters were used during training:
|
|
57 |
|
58 |
| Training Loss | Epoch | Step | Validation Loss |
|
59 |
|:-------------:|:------:|:----:|:---------------:|
|
60 |
-
|
|
61 |
|
62 |
|
63 |
### Framework versions
|
64 |
|
65 |
-
- PEFT 0.13.
|
66 |
- Transformers 4.46.2
|
67 |
- Pytorch 2.3.1+cu121
|
68 |
- Datasets 3.1.0
|
|
|
20 |
|
21 |
This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the generator dataset.
|
22 |
It achieves the following results on the evaluation set:
|
23 |
+
- Loss: 1.7210
|
24 |
|
25 |
## Model description
|
26 |
|
|
|
57 |
|
58 |
| Training Loss | Epoch | Step | Validation Loss |
|
59 |
|:-------------:|:------:|:----:|:---------------:|
|
60 |
+
| 1.7241 | 0.9945 | 91 | 1.7210 |
|
61 |
|
62 |
|
63 |
### Framework versions
|
64 |
|
65 |
+
- PEFT 0.13.2
|
66 |
- Transformers 4.46.2
|
67 |
- Pytorch 2.3.1+cu121
|
68 |
- Datasets 3.1.0
|
all_results.json
CHANGED
@@ -1,14 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 0.994535519125683,
|
3 |
-
"eval_loss": 14.016096115112305,
|
4 |
-
"eval_runtime": 2.0922,
|
5 |
-
"eval_samples": 518,
|
6 |
-
"eval_samples_per_second": 83.167,
|
7 |
-
"eval_steps_per_second": 0.956,
|
8 |
"total_flos": 4.03002565094015e+17,
|
9 |
-
"train_loss":
|
10 |
-
"train_runtime":
|
11 |
"train_samples": 51241,
|
12 |
-
"train_samples_per_second":
|
13 |
-
"train_steps_per_second": 0.
|
14 |
}
|
|
|
1 |
{
|
2 |
"epoch": 0.994535519125683,
|
|
|
|
|
|
|
|
|
|
|
3 |
"total_flos": 4.03002565094015e+17,
|
4 |
+
"train_loss": 1.799644404715234,
|
5 |
+
"train_runtime": 332.7348,
|
6 |
"train_samples": 51241,
|
7 |
+
"train_samples_per_second": 52.685,
|
8 |
+
"train_steps_per_second": 0.273
|
9 |
}
|
train_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"epoch": 0.994535519125683,
|
3 |
"total_flos": 4.03002565094015e+17,
|
4 |
-
"train_loss":
|
5 |
-
"train_runtime":
|
6 |
"train_samples": 51241,
|
7 |
-
"train_samples_per_second":
|
8 |
-
"train_steps_per_second": 0.
|
9 |
}
|
|
|
1 |
{
|
2 |
"epoch": 0.994535519125683,
|
3 |
"total_flos": 4.03002565094015e+17,
|
4 |
+
"train_loss": 1.799644404715234,
|
5 |
+
"train_runtime": 332.7348,
|
6 |
"train_samples": 51241,
|
7 |
+
"train_samples_per_second": 52.685,
|
8 |
+
"train_steps_per_second": 0.273
|
9 |
}
|
trainer_state.json
CHANGED
@@ -10,153 +10,153 @@
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.01092896174863388,
|
13 |
-
"grad_norm":
|
14 |
"learning_rate": 2e-05,
|
15 |
-
"loss":
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
"epoch": 0.0546448087431694,
|
20 |
-
"grad_norm":
|
21 |
"learning_rate": 0.0001,
|
22 |
-
"loss":
|
23 |
"step": 5
|
24 |
},
|
25 |
{
|
26 |
"epoch": 0.1092896174863388,
|
27 |
-
"grad_norm": 1.
|
28 |
"learning_rate": 0.0002,
|
29 |
-
"loss":
|
30 |
"step": 10
|
31 |
},
|
32 |
{
|
33 |
"epoch": 0.16393442622950818,
|
34 |
-
"grad_norm":
|
35 |
"learning_rate": 0.00019812553106273847,
|
36 |
-
"loss":
|
37 |
"step": 15
|
38 |
},
|
39 |
{
|
40 |
"epoch": 0.2185792349726776,
|
41 |
-
"grad_norm":
|
42 |
"learning_rate": 0.00019257239692688907,
|
43 |
-
"loss":
|
44 |
"step": 20
|
45 |
},
|
46 |
{
|
47 |
"epoch": 0.273224043715847,
|
48 |
-
"grad_norm":
|
49 |
"learning_rate": 0.00018354878114129367,
|
50 |
-
"loss":
|
51 |
"step": 25
|
52 |
},
|
53 |
{
|
54 |
"epoch": 0.32786885245901637,
|
55 |
-
"grad_norm":
|
56 |
"learning_rate": 0.00017139297345578994,
|
57 |
-
"loss":
|
58 |
"step": 30
|
59 |
},
|
60 |
{
|
61 |
"epoch": 0.3825136612021858,
|
62 |
-
"grad_norm":
|
63 |
"learning_rate": 0.00015656068754865387,
|
64 |
-
"loss":
|
65 |
"step": 35
|
66 |
},
|
67 |
{
|
68 |
"epoch": 0.4371584699453552,
|
69 |
-
"grad_norm":
|
70 |
"learning_rate": 0.0001396079766039157,
|
71 |
-
"loss":
|
72 |
"step": 40
|
73 |
},
|
74 |
{
|
75 |
"epoch": 0.4918032786885246,
|
76 |
-
"grad_norm":
|
77 |
"learning_rate": 0.0001211703872229411,
|
78 |
-
"loss":
|
79 |
"step": 45
|
80 |
},
|
81 |
{
|
82 |
"epoch": 0.546448087431694,
|
83 |
-
"grad_norm":
|
84 |
"learning_rate": 0.00010193913317718244,
|
85 |
-
"loss":
|
86 |
"step": 50
|
87 |
},
|
88 |
{
|
89 |
"epoch": 0.6010928961748634,
|
90 |
-
"grad_norm":
|
91 |
"learning_rate": 8.263518223330697e-05,
|
92 |
-
"loss":
|
93 |
"step": 55
|
94 |
},
|
95 |
{
|
96 |
"epoch": 0.6557377049180327,
|
97 |
-
"grad_norm":
|
98 |
"learning_rate": 6.398222751952899e-05,
|
99 |
-
"loss":
|
100 |
"step": 60
|
101 |
},
|
102 |
{
|
103 |
"epoch": 0.7103825136612022,
|
104 |
-
"grad_norm":
|
105 |
"learning_rate": 4.66795567198309e-05,
|
106 |
-
"loss":
|
107 |
"step": 65
|
108 |
},
|
109 |
{
|
110 |
"epoch": 0.7650273224043715,
|
111 |
-
"grad_norm":
|
112 |
"learning_rate": 3.137583621312665e-05,
|
113 |
-
"loss":
|
114 |
"step": 70
|
115 |
},
|
116 |
{
|
117 |
"epoch": 0.819672131147541,
|
118 |
-
"grad_norm":
|
119 |
"learning_rate": 1.864479297370325e-05,
|
120 |
-
"loss":
|
121 |
"step": 75
|
122 |
},
|
123 |
{
|
124 |
"epoch": 0.8743169398907104,
|
125 |
-
"grad_norm":
|
126 |
"learning_rate": 8.963705903385345e-06,
|
127 |
-
"loss":
|
128 |
"step": 80
|
129 |
},
|
130 |
{
|
131 |
"epoch": 0.9289617486338798,
|
132 |
-
"grad_norm":
|
133 |
"learning_rate": 2.6955129420176196e-06,
|
134 |
-
"loss":
|
135 |
"step": 85
|
136 |
},
|
137 |
{
|
138 |
"epoch": 0.9836065573770492,
|
139 |
-
"grad_norm":
|
140 |
"learning_rate": 7.520474957699586e-08,
|
141 |
-
"loss":
|
142 |
"step": 90
|
143 |
},
|
144 |
{
|
145 |
"epoch": 0.994535519125683,
|
146 |
-
"eval_loss":
|
147 |
-
"eval_runtime":
|
148 |
-
"eval_samples_per_second":
|
149 |
-
"eval_steps_per_second":
|
150 |
"step": 91
|
151 |
},
|
152 |
{
|
153 |
"epoch": 0.994535519125683,
|
154 |
"step": 91,
|
155 |
"total_flos": 4.03002565094015e+17,
|
156 |
-
"train_loss":
|
157 |
-
"train_runtime":
|
158 |
-
"train_samples_per_second":
|
159 |
-
"train_steps_per_second": 0.
|
160 |
}
|
161 |
],
|
162 |
"logging_steps": 5,
|
|
|
10 |
"log_history": [
|
11 |
{
|
12 |
"epoch": 0.01092896174863388,
|
13 |
+
"grad_norm": 67.9917221069336,
|
14 |
"learning_rate": 2e-05,
|
15 |
+
"loss": 2.2916,
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
"epoch": 0.0546448087431694,
|
20 |
+
"grad_norm": 2.097611665725708,
|
21 |
"learning_rate": 0.0001,
|
22 |
+
"loss": 2.2127,
|
23 |
"step": 5
|
24 |
},
|
25 |
{
|
26 |
"epoch": 0.1092896174863388,
|
27 |
+
"grad_norm": 1.9431463479995728,
|
28 |
"learning_rate": 0.0002,
|
29 |
+
"loss": 2.0887,
|
30 |
"step": 10
|
31 |
},
|
32 |
{
|
33 |
"epoch": 0.16393442622950818,
|
34 |
+
"grad_norm": 1.8884824514389038,
|
35 |
"learning_rate": 0.00019812553106273847,
|
36 |
+
"loss": 1.9685,
|
37 |
"step": 15
|
38 |
},
|
39 |
{
|
40 |
"epoch": 0.2185792349726776,
|
41 |
+
"grad_norm": 2.330018997192383,
|
42 |
"learning_rate": 0.00019257239692688907,
|
43 |
+
"loss": 1.8856,
|
44 |
"step": 20
|
45 |
},
|
46 |
{
|
47 |
"epoch": 0.273224043715847,
|
48 |
+
"grad_norm": 0.6340972781181335,
|
49 |
"learning_rate": 0.00018354878114129367,
|
50 |
+
"loss": 1.8482,
|
51 |
"step": 25
|
52 |
},
|
53 |
{
|
54 |
"epoch": 0.32786885245901637,
|
55 |
+
"grad_norm": 0.45715248584747314,
|
56 |
"learning_rate": 0.00017139297345578994,
|
57 |
+
"loss": 1.7705,
|
58 |
"step": 30
|
59 |
},
|
60 |
{
|
61 |
"epoch": 0.3825136612021858,
|
62 |
+
"grad_norm": 1.0445672273635864,
|
63 |
"learning_rate": 0.00015656068754865387,
|
64 |
+
"loss": 1.7627,
|
65 |
"step": 35
|
66 |
},
|
67 |
{
|
68 |
"epoch": 0.4371584699453552,
|
69 |
+
"grad_norm": 0.503304123878479,
|
70 |
"learning_rate": 0.0001396079766039157,
|
71 |
+
"loss": 1.7488,
|
72 |
"step": 40
|
73 |
},
|
74 |
{
|
75 |
"epoch": 0.4918032786885246,
|
76 |
+
"grad_norm": 0.5089389681816101,
|
77 |
"learning_rate": 0.0001211703872229411,
|
78 |
+
"loss": 1.7165,
|
79 |
"step": 45
|
80 |
},
|
81 |
{
|
82 |
"epoch": 0.546448087431694,
|
83 |
+
"grad_norm": 0.416858971118927,
|
84 |
"learning_rate": 0.00010193913317718244,
|
85 |
+
"loss": 1.7396,
|
86 |
"step": 50
|
87 |
},
|
88 |
{
|
89 |
"epoch": 0.6010928961748634,
|
90 |
+
"grad_norm": 0.29764389991760254,
|
91 |
"learning_rate": 8.263518223330697e-05,
|
92 |
+
"loss": 1.7045,
|
93 |
"step": 55
|
94 |
},
|
95 |
{
|
96 |
"epoch": 0.6557377049180327,
|
97 |
+
"grad_norm": 0.3781444728374481,
|
98 |
"learning_rate": 6.398222751952899e-05,
|
99 |
+
"loss": 1.6977,
|
100 |
"step": 60
|
101 |
},
|
102 |
{
|
103 |
"epoch": 0.7103825136612022,
|
104 |
+
"grad_norm": 0.3005172610282898,
|
105 |
"learning_rate": 4.66795567198309e-05,
|
106 |
+
"loss": 1.715,
|
107 |
"step": 65
|
108 |
},
|
109 |
{
|
110 |
"epoch": 0.7650273224043715,
|
111 |
+
"grad_norm": 0.39595624804496765,
|
112 |
"learning_rate": 3.137583621312665e-05,
|
113 |
+
"loss": 1.7143,
|
114 |
"step": 70
|
115 |
},
|
116 |
{
|
117 |
"epoch": 0.819672131147541,
|
118 |
+
"grad_norm": 0.4393070638179779,
|
119 |
"learning_rate": 1.864479297370325e-05,
|
120 |
+
"loss": 1.6919,
|
121 |
"step": 75
|
122 |
},
|
123 |
{
|
124 |
"epoch": 0.8743169398907104,
|
125 |
+
"grad_norm": 0.38544324040412903,
|
126 |
"learning_rate": 8.963705903385345e-06,
|
127 |
+
"loss": 1.7241,
|
128 |
"step": 80
|
129 |
},
|
130 |
{
|
131 |
"epoch": 0.9289617486338798,
|
132 |
+
"grad_norm": 0.5594367980957031,
|
133 |
"learning_rate": 2.6955129420176196e-06,
|
134 |
+
"loss": 1.6885,
|
135 |
"step": 85
|
136 |
},
|
137 |
{
|
138 |
"epoch": 0.9836065573770492,
|
139 |
+
"grad_norm": 0.2916119396686554,
|
140 |
"learning_rate": 7.520474957699586e-08,
|
141 |
+
"loss": 1.7241,
|
142 |
"step": 90
|
143 |
},
|
144 |
{
|
145 |
"epoch": 0.994535519125683,
|
146 |
+
"eval_loss": 1.7210174798965454,
|
147 |
+
"eval_runtime": 1.2379,
|
148 |
+
"eval_samples_per_second": 140.557,
|
149 |
+
"eval_steps_per_second": 1.616,
|
150 |
"step": 91
|
151 |
},
|
152 |
{
|
153 |
"epoch": 0.994535519125683,
|
154 |
"step": 91,
|
155 |
"total_flos": 4.03002565094015e+17,
|
156 |
+
"train_loss": 1.799644404715234,
|
157 |
+
"train_runtime": 332.7348,
|
158 |
+
"train_samples_per_second": 52.685,
|
159 |
+
"train_steps_per_second": 0.273
|
160 |
}
|
161 |
],
|
162 |
"logging_steps": 5,
|