Upload 9 files
Browse files- README.md +54 -1
- adapter_config.json +23 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- special_tokens_map.json +17 -0
- tokenizer.json +0 -0
- tokenizer_config.json +7 -0
- trainer_state.json +256 -0
- training_args.bin +3 -0
README.md
CHANGED
@@ -1,3 +1,56 @@
|
|
1 |
---
|
2 |
-
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
library_name: peft
|
3 |
---
|
4 |
+
## Training procedure
|
5 |
+
|
6 |
+
|
7 |
+
The following `bitsandbytes` quantization config was used during training:
|
8 |
+
- load_in_8bit: False
|
9 |
+
- load_in_4bit: True
|
10 |
+
- llm_int8_threshold: 6.0
|
11 |
+
- llm_int8_skip_modules: None
|
12 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
13 |
+
- llm_int8_has_fp16_weight: False
|
14 |
+
- bnb_4bit_quant_type: nf4
|
15 |
+
- bnb_4bit_use_double_quant: False
|
16 |
+
- bnb_4bit_compute_dtype: float16
|
17 |
+
|
18 |
+
The following `bitsandbytes` quantization config was used during training:
|
19 |
+
- load_in_8bit: False
|
20 |
+
- load_in_4bit: True
|
21 |
+
- llm_int8_threshold: 6.0
|
22 |
+
- llm_int8_skip_modules: None
|
23 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
24 |
+
- llm_int8_has_fp16_weight: False
|
25 |
+
- bnb_4bit_quant_type: nf4
|
26 |
+
- bnb_4bit_use_double_quant: False
|
27 |
+
- bnb_4bit_compute_dtype: float16
|
28 |
+
|
29 |
+
The following `bitsandbytes` quantization config was used during training:
|
30 |
+
- load_in_8bit: False
|
31 |
+
- load_in_4bit: True
|
32 |
+
- llm_int8_threshold: 6.0
|
33 |
+
- llm_int8_skip_modules: None
|
34 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
35 |
+
- llm_int8_has_fp16_weight: False
|
36 |
+
- bnb_4bit_quant_type: nf4
|
37 |
+
- bnb_4bit_use_double_quant: False
|
38 |
+
- bnb_4bit_compute_dtype: float16
|
39 |
+
|
40 |
+
The following `bitsandbytes` quantization config was used during training:
|
41 |
+
- load_in_8bit: False
|
42 |
+
- load_in_4bit: True
|
43 |
+
- llm_int8_threshold: 6.0
|
44 |
+
- llm_int8_skip_modules: None
|
45 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
46 |
+
- llm_int8_has_fp16_weight: False
|
47 |
+
- bnb_4bit_quant_type: nf4
|
48 |
+
- bnb_4bit_use_double_quant: False
|
49 |
+
- bnb_4bit_compute_dtype: float16
|
50 |
+
### Framework versions
|
51 |
+
|
52 |
+
- PEFT 0.5.0.dev0
|
53 |
+
- PEFT 0.5.0.dev0
|
54 |
+
- PEFT 0.5.0.dev0
|
55 |
+
|
56 |
+
- PEFT 0.5.0.dev0
|
adapter_config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"auto_mapping": null,
|
3 |
+
"base_model_name_or_path": "ybelkada/falcon-7b-sharded-bf16",
|
4 |
+
"bias": "none",
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"init_lora_weights": true,
|
8 |
+
"layers_pattern": null,
|
9 |
+
"layers_to_transform": null,
|
10 |
+
"lora_alpha": 16,
|
11 |
+
"lora_dropout": 0.1,
|
12 |
+
"modules_to_save": null,
|
13 |
+
"peft_type": "LORA",
|
14 |
+
"r": 64,
|
15 |
+
"revision": null,
|
16 |
+
"target_modules": [
|
17 |
+
"query_key_value",
|
18 |
+
"dense",
|
19 |
+
"dense_h_to_4h",
|
20 |
+
"dense_4h_to_h"
|
21 |
+
],
|
22 |
+
"task_type": "CAUSAL_LM"
|
23 |
+
}
|
rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8aa695d6ff41defe8f0a0969a0a2c86a78e07f3dd9ef7782bf6f6746a712392
|
3 |
+
size 14575
|
scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:13276f15dd2b6acc19b970176aa2db4ac9b58241843e72c89b50e3094e903b19
|
3 |
+
size 627
|
special_tokens_map.json
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
">>TITLE<<",
|
4 |
+
">>ABSTRACT<<",
|
5 |
+
">>INTRODUCTION<<",
|
6 |
+
">>SUMMARY<<",
|
7 |
+
">>COMMENT<<",
|
8 |
+
">>ANSWER<<",
|
9 |
+
">>QUESTION<<",
|
10 |
+
">>DOMAIN<<",
|
11 |
+
">>PREFIX<<",
|
12 |
+
">>SUFFIX<<",
|
13 |
+
">>MIDDLE<<"
|
14 |
+
],
|
15 |
+
"eos_token": "<|endoftext|>",
|
16 |
+
"pad_token": "<|endoftext|>"
|
17 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"clean_up_tokenization_spaces": true,
|
4 |
+
"eos_token": "<|endoftext|>",
|
5 |
+
"model_max_length": 2048,
|
6 |
+
"tokenizer_class": "PreTrainedTokenizerFast"
|
7 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 4.63768115942029,
|
5 |
+
"global_step": 400,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.12,
|
12 |
+
"learning_rate": 0.0002,
|
13 |
+
"loss": 2.8625,
|
14 |
+
"step": 10
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.23,
|
18 |
+
"learning_rate": 0.0002,
|
19 |
+
"loss": 2.8987,
|
20 |
+
"step": 20
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 0.35,
|
24 |
+
"learning_rate": 0.0002,
|
25 |
+
"loss": 2.7552,
|
26 |
+
"step": 30
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"epoch": 0.46,
|
30 |
+
"learning_rate": 0.0002,
|
31 |
+
"loss": 2.8627,
|
32 |
+
"step": 40
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"epoch": 0.58,
|
36 |
+
"learning_rate": 0.0002,
|
37 |
+
"loss": 2.6602,
|
38 |
+
"step": 50
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 0.7,
|
42 |
+
"learning_rate": 0.0002,
|
43 |
+
"loss": 2.8089,
|
44 |
+
"step": 60
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 0.81,
|
48 |
+
"learning_rate": 0.0002,
|
49 |
+
"loss": 2.7102,
|
50 |
+
"step": 70
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"epoch": 0.93,
|
54 |
+
"learning_rate": 0.0002,
|
55 |
+
"loss": 2.772,
|
56 |
+
"step": 80
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 1.04,
|
60 |
+
"learning_rate": 0.0002,
|
61 |
+
"loss": 2.6953,
|
62 |
+
"step": 90
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 1.16,
|
66 |
+
"learning_rate": 0.0002,
|
67 |
+
"loss": 2.6401,
|
68 |
+
"step": 100
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"epoch": 1.28,
|
72 |
+
"learning_rate": 0.0002,
|
73 |
+
"loss": 2.648,
|
74 |
+
"step": 110
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 1.39,
|
78 |
+
"learning_rate": 0.0002,
|
79 |
+
"loss": 2.5756,
|
80 |
+
"step": 120
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"epoch": 1.51,
|
84 |
+
"learning_rate": 0.0002,
|
85 |
+
"loss": 2.6002,
|
86 |
+
"step": 130
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 1.62,
|
90 |
+
"learning_rate": 0.0002,
|
91 |
+
"loss": 2.5801,
|
92 |
+
"step": 140
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"epoch": 1.74,
|
96 |
+
"learning_rate": 0.0002,
|
97 |
+
"loss": 2.6678,
|
98 |
+
"step": 150
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 1.86,
|
102 |
+
"learning_rate": 0.0002,
|
103 |
+
"loss": 2.5758,
|
104 |
+
"step": 160
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 1.97,
|
108 |
+
"learning_rate": 0.0002,
|
109 |
+
"loss": 2.6861,
|
110 |
+
"step": 170
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 2.09,
|
114 |
+
"learning_rate": 0.0002,
|
115 |
+
"loss": 2.4169,
|
116 |
+
"step": 180
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"epoch": 2.2,
|
120 |
+
"learning_rate": 0.0002,
|
121 |
+
"loss": 2.3657,
|
122 |
+
"step": 190
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"epoch": 2.32,
|
126 |
+
"learning_rate": 0.0002,
|
127 |
+
"loss": 2.3153,
|
128 |
+
"step": 200
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 2.43,
|
132 |
+
"learning_rate": 0.0002,
|
133 |
+
"loss": 2.3601,
|
134 |
+
"step": 210
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"epoch": 2.55,
|
138 |
+
"learning_rate": 0.0002,
|
139 |
+
"loss": 2.3498,
|
140 |
+
"step": 220
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"epoch": 2.67,
|
144 |
+
"learning_rate": 0.0002,
|
145 |
+
"loss": 2.4114,
|
146 |
+
"step": 230
|
147 |
+
},
|
148 |
+
{
|
149 |
+
"epoch": 2.78,
|
150 |
+
"learning_rate": 0.0002,
|
151 |
+
"loss": 2.3708,
|
152 |
+
"step": 240
|
153 |
+
},
|
154 |
+
{
|
155 |
+
"epoch": 2.9,
|
156 |
+
"learning_rate": 0.0002,
|
157 |
+
"loss": 2.4176,
|
158 |
+
"step": 250
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"epoch": 3.01,
|
162 |
+
"learning_rate": 0.0002,
|
163 |
+
"loss": 2.3728,
|
164 |
+
"step": 260
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"epoch": 3.13,
|
168 |
+
"learning_rate": 0.0002,
|
169 |
+
"loss": 2.1058,
|
170 |
+
"step": 270
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"epoch": 3.25,
|
174 |
+
"learning_rate": 0.0002,
|
175 |
+
"loss": 2.0375,
|
176 |
+
"step": 280
|
177 |
+
},
|
178 |
+
{
|
179 |
+
"epoch": 3.36,
|
180 |
+
"learning_rate": 0.0002,
|
181 |
+
"loss": 2.0828,
|
182 |
+
"step": 290
|
183 |
+
},
|
184 |
+
{
|
185 |
+
"epoch": 3.48,
|
186 |
+
"learning_rate": 0.0002,
|
187 |
+
"loss": 2.081,
|
188 |
+
"step": 300
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"epoch": 3.59,
|
192 |
+
"learning_rate": 0.0002,
|
193 |
+
"loss": 2.1004,
|
194 |
+
"step": 310
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"epoch": 3.71,
|
198 |
+
"learning_rate": 0.0002,
|
199 |
+
"loss": 2.1472,
|
200 |
+
"step": 320
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"epoch": 3.83,
|
204 |
+
"learning_rate": 0.0002,
|
205 |
+
"loss": 2.1437,
|
206 |
+
"step": 330
|
207 |
+
},
|
208 |
+
{
|
209 |
+
"epoch": 3.94,
|
210 |
+
"learning_rate": 0.0002,
|
211 |
+
"loss": 2.149,
|
212 |
+
"step": 340
|
213 |
+
},
|
214 |
+
{
|
215 |
+
"epoch": 4.06,
|
216 |
+
"learning_rate": 0.0002,
|
217 |
+
"loss": 1.9758,
|
218 |
+
"step": 350
|
219 |
+
},
|
220 |
+
{
|
221 |
+
"epoch": 4.17,
|
222 |
+
"learning_rate": 0.0002,
|
223 |
+
"loss": 1.791,
|
224 |
+
"step": 360
|
225 |
+
},
|
226 |
+
{
|
227 |
+
"epoch": 4.29,
|
228 |
+
"learning_rate": 0.0002,
|
229 |
+
"loss": 1.7366,
|
230 |
+
"step": 370
|
231 |
+
},
|
232 |
+
{
|
233 |
+
"epoch": 4.41,
|
234 |
+
"learning_rate": 0.0002,
|
235 |
+
"loss": 1.8066,
|
236 |
+
"step": 380
|
237 |
+
},
|
238 |
+
{
|
239 |
+
"epoch": 4.52,
|
240 |
+
"learning_rate": 0.0002,
|
241 |
+
"loss": 1.7821,
|
242 |
+
"step": 390
|
243 |
+
},
|
244 |
+
{
|
245 |
+
"epoch": 4.64,
|
246 |
+
"learning_rate": 0.0002,
|
247 |
+
"loss": 1.8661,
|
248 |
+
"step": 400
|
249 |
+
}
|
250 |
+
],
|
251 |
+
"max_steps": 500,
|
252 |
+
"num_train_epochs": 6,
|
253 |
+
"total_flos": 1.897304296714445e+16,
|
254 |
+
"trial_name": null,
|
255 |
+
"trial_params": null
|
256 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e32f6466c16ba713013fd98facddbea62a306618253d2be1824692787d74ffb
|
3 |
+
size 4027
|