tmnam20 commited on
Commit
38e02dd
1 Parent(s): 27f97ad

Training in progress, step 22000, checkpoint

Browse files
checkpoint-22000/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: True
17
+ - bnb_4bit_compute_dtype: bfloat16
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-22000/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "codellama/CodeLlama-7b-Instruct-hf",
4
+ "bias": "lora_only",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 8,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
checkpoint-22000/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57ac9a638012d9a59c281659d1ba638178b3647ffbee9f9f2bd845604f15ea8b
3
+ size 16822989
checkpoint-22000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06230eaea1f9dae455c0d5f14858ee6835caeebdb23365ddaa62c0e71a68a8cf
3
+ size 8555781
checkpoint-22000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b43766722ff911f35604fa71f01420c9e7a3b0e1e8cfd62bed055299e9dd782
3
+ size 14575
checkpoint-22000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dbd237b1e1d36dbcec04afa2f1cd75aa7edf4cf97dadf15d33cb45648441f0b
3
+ size 627
checkpoint-22000/trainer_state.json ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.017868032678961754,
3
+ "best_model_checkpoint": "./text2sql/codellama_instruct_pt_text2sql/checkpoint-22000",
4
+ "epoch": 2.450741488546961,
5
+ "eval_steps": 2000,
6
+ "global_step": 22000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.1139992573338283e-07,
14
+ "loss": 1.2191,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.11,
19
+ "learning_rate": 0.00011139992573338284,
20
+ "loss": 0.2684,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.22,
25
+ "learning_rate": 0.00022279985146676567,
26
+ "loss": 0.0693,
27
+ "step": 2000
28
+ },
29
+ {
30
+ "epoch": 0.22,
31
+ "eval_loss": 0.05887996032834053,
32
+ "eval_runtime": 171.127,
33
+ "eval_samples_per_second": 11.687,
34
+ "eval_steps_per_second": 1.461,
35
+ "step": 2000
36
+ },
37
+ {
38
+ "epoch": 0.33,
39
+ "learning_rate": 0.00029781686301467276,
40
+ "loss": 0.0565,
41
+ "step": 3000
42
+ },
43
+ {
44
+ "epoch": 0.45,
45
+ "learning_rate": 0.0002907056676227274,
46
+ "loss": 0.047,
47
+ "step": 4000
48
+ },
49
+ {
50
+ "epoch": 0.45,
51
+ "eval_loss": 0.03957173973321915,
52
+ "eval_runtime": 171.3682,
53
+ "eval_samples_per_second": 11.671,
54
+ "eval_steps_per_second": 1.459,
55
+ "step": 4000
56
+ },
57
+ {
58
+ "epoch": 0.56,
59
+ "learning_rate": 0.00028359447223078195,
60
+ "loss": 0.0403,
61
+ "step": 5000
62
+ },
63
+ {
64
+ "epoch": 0.67,
65
+ "learning_rate": 0.0002764832768388366,
66
+ "loss": 0.0364,
67
+ "step": 6000
68
+ },
69
+ {
70
+ "epoch": 0.67,
71
+ "eval_loss": 0.030724667012691498,
72
+ "eval_runtime": 170.9405,
73
+ "eval_samples_per_second": 11.7,
74
+ "eval_steps_per_second": 1.462,
75
+ "step": 6000
76
+ },
77
+ {
78
+ "epoch": 0.78,
79
+ "learning_rate": 0.0002693720814468912,
80
+ "loss": 0.0341,
81
+ "step": 7000
82
+ },
83
+ {
84
+ "epoch": 0.89,
85
+ "learning_rate": 0.00026226088605494583,
86
+ "loss": 0.0311,
87
+ "step": 8000
88
+ },
89
+ {
90
+ "epoch": 0.89,
91
+ "eval_loss": 0.027806701138615608,
92
+ "eval_runtime": 171.064,
93
+ "eval_samples_per_second": 11.692,
94
+ "eval_steps_per_second": 1.461,
95
+ "step": 8000
96
+ },
97
+ {
98
+ "epoch": 1.0,
99
+ "learning_rate": 0.00025514969066300046,
100
+ "loss": 0.0289,
101
+ "step": 9000
102
+ },
103
+ {
104
+ "epoch": 1.11,
105
+ "learning_rate": 0.00024803849527105503,
106
+ "loss": 0.0251,
107
+ "step": 10000
108
+ },
109
+ {
110
+ "epoch": 1.11,
111
+ "eval_loss": 0.024082325398921967,
112
+ "eval_runtime": 171.4689,
113
+ "eval_samples_per_second": 11.664,
114
+ "eval_steps_per_second": 1.458,
115
+ "step": 10000
116
+ },
117
+ {
118
+ "epoch": 1.23,
119
+ "learning_rate": 0.00024092729987910966,
120
+ "loss": 0.0242,
121
+ "step": 11000
122
+ },
123
+ {
124
+ "epoch": 1.34,
125
+ "learning_rate": 0.00023381610448716428,
126
+ "loss": 0.0243,
127
+ "step": 12000
128
+ },
129
+ {
130
+ "epoch": 1.34,
131
+ "eval_loss": 0.02279273048043251,
132
+ "eval_runtime": 171.2147,
133
+ "eval_samples_per_second": 11.681,
134
+ "eval_steps_per_second": 1.46,
135
+ "step": 12000
136
+ },
137
+ {
138
+ "epoch": 1.45,
139
+ "learning_rate": 0.00022670490909521888,
140
+ "loss": 0.0219,
141
+ "step": 13000
142
+ },
143
+ {
144
+ "epoch": 1.56,
145
+ "learning_rate": 0.0002195937137032735,
146
+ "loss": 0.0227,
147
+ "step": 14000
148
+ },
149
+ {
150
+ "epoch": 1.56,
151
+ "eval_loss": 0.022252563387155533,
152
+ "eval_runtime": 171.1834,
153
+ "eval_samples_per_second": 11.683,
154
+ "eval_steps_per_second": 1.46,
155
+ "step": 14000
156
+ },
157
+ {
158
+ "epoch": 1.67,
159
+ "learning_rate": 0.00021248251831132813,
160
+ "loss": 0.0218,
161
+ "step": 15000
162
+ },
163
+ {
164
+ "epoch": 1.78,
165
+ "learning_rate": 0.00020537132291938273,
166
+ "loss": 0.0212,
167
+ "step": 16000
168
+ },
169
+ {
170
+ "epoch": 1.78,
171
+ "eval_loss": 0.020056582987308502,
172
+ "eval_runtime": 170.9191,
173
+ "eval_samples_per_second": 11.701,
174
+ "eval_steps_per_second": 1.463,
175
+ "step": 16000
176
+ },
177
+ {
178
+ "epoch": 1.89,
179
+ "learning_rate": 0.00019826012752743733,
180
+ "loss": 0.0208,
181
+ "step": 17000
182
+ },
183
+ {
184
+ "epoch": 2.01,
185
+ "learning_rate": 0.00019114893213549196,
186
+ "loss": 0.0202,
187
+ "step": 18000
188
+ },
189
+ {
190
+ "epoch": 2.01,
191
+ "eval_loss": 0.01821763999760151,
192
+ "eval_runtime": 171.4736,
193
+ "eval_samples_per_second": 11.664,
194
+ "eval_steps_per_second": 1.458,
195
+ "step": 18000
196
+ },
197
+ {
198
+ "epoch": 2.12,
199
+ "learning_rate": 0.00018403773674354658,
200
+ "loss": 0.0161,
201
+ "step": 19000
202
+ },
203
+ {
204
+ "epoch": 2.23,
205
+ "learning_rate": 0.0001769265413516012,
206
+ "loss": 0.016,
207
+ "step": 20000
208
+ },
209
+ {
210
+ "epoch": 2.23,
211
+ "eval_loss": 0.018352985382080078,
212
+ "eval_runtime": 171.2111,
213
+ "eval_samples_per_second": 11.681,
214
+ "eval_steps_per_second": 1.46,
215
+ "step": 20000
216
+ },
217
+ {
218
+ "epoch": 2.34,
219
+ "learning_rate": 0.00016981534595965578,
220
+ "loss": 0.0155,
221
+ "step": 21000
222
+ },
223
+ {
224
+ "epoch": 2.45,
225
+ "learning_rate": 0.0001627041505677104,
226
+ "loss": 0.0156,
227
+ "step": 22000
228
+ },
229
+ {
230
+ "epoch": 2.45,
231
+ "eval_loss": 0.017868032678961754,
232
+ "eval_runtime": 171.2047,
233
+ "eval_samples_per_second": 11.682,
234
+ "eval_steps_per_second": 1.46,
235
+ "step": 22000
236
+ }
237
+ ],
238
+ "logging_steps": 1000,
239
+ "max_steps": 44880,
240
+ "num_train_epochs": 5,
241
+ "save_steps": 2000,
242
+ "total_flos": 1.0708801647854813e+19,
243
+ "trial_name": null,
244
+ "trial_params": null
245
+ }
checkpoint-22000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c3233085912f12f47eb0b59979d9c767ddd5d0fc4cd96a7ebdcd9b12e9ba2c6
3
+ size 4219