second commit
Browse files- all_results.json +7 -7
- generated_predictions.jsonl +0 -0
- llamaboard_config.yaml +12 -57
- predict_results.json +9 -0
- running_log.txt +78 -534
- trainer_log.jsonl +15 -191
- training_args.yaml +9 -21
all_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
-
"
|
4 |
-
"
|
5 |
-
"
|
6 |
-
"
|
7 |
-
"
|
8 |
-
"
|
9 |
}
|
|
|
1 |
{
|
2 |
+
"predict_bleu-4": 83.34018267405064,
|
3 |
+
"predict_rouge-1": 89.24050632911393,
|
4 |
+
"predict_rouge-2": 0.0,
|
5 |
+
"predict_rouge-l": 89.24050632911393,
|
6 |
+
"predict_runtime": 10.2347,
|
7 |
+
"predict_samples_per_second": 122.427,
|
8 |
+
"predict_steps_per_second": 7.719
|
9 |
}
|
generated_predictions.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llamaboard_config.yaml
CHANGED
@@ -1,5 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
top.booster: auto
|
2 |
-
top.checkpoint_path:
|
3 |
top.finetuning_type: full
|
4 |
top.model_name: LLaMA2-7B-Chat
|
5 |
top.quantization_bit: none
|
@@ -7,59 +18,3 @@ top.quantization_method: bitsandbytes
|
|
7 |
top.rope_scaling: none
|
8 |
top.template: llama2
|
9 |
top.visual_inputs: false
|
10 |
-
train.additional_target: ''
|
11 |
-
train.badam_mode: layer
|
12 |
-
train.badam_switch_interval: 50
|
13 |
-
train.badam_switch_mode: ascending
|
14 |
-
train.badam_update_ratio: 0.05
|
15 |
-
train.batch_size: 2
|
16 |
-
train.compute_type: bf16
|
17 |
-
train.create_new_adapter: false
|
18 |
-
train.cutoff_len: 1024
|
19 |
-
train.dataset:
|
20 |
-
- truth_train_0716_2
|
21 |
-
train.dataset_dir: data
|
22 |
-
train.ds_offload: false
|
23 |
-
train.ds_stage: '2'
|
24 |
-
train.freeze_extra_modules: ''
|
25 |
-
train.freeze_trainable_layers: 2
|
26 |
-
train.freeze_trainable_modules: all
|
27 |
-
train.galore_rank: 16
|
28 |
-
train.galore_scale: 0.25
|
29 |
-
train.galore_target: all
|
30 |
-
train.galore_update_interval: 200
|
31 |
-
train.gradient_accumulation_steps: 8
|
32 |
-
train.learning_rate: 5e-6
|
33 |
-
train.logging_steps: 1
|
34 |
-
train.lora_alpha: 16
|
35 |
-
train.lora_dropout: 0
|
36 |
-
train.lora_rank: 8
|
37 |
-
train.lora_target: ''
|
38 |
-
train.loraplus_lr_ratio: 0
|
39 |
-
train.lr_scheduler_type: cosine
|
40 |
-
train.max_grad_norm: '1.0'
|
41 |
-
train.max_samples: '100000'
|
42 |
-
train.neat_packing: false
|
43 |
-
train.neftune_alpha: 0
|
44 |
-
train.num_train_epochs: '5.0'
|
45 |
-
train.optim: adamw_torch
|
46 |
-
train.packing: false
|
47 |
-
train.ppo_score_norm: false
|
48 |
-
train.ppo_whiten_rewards: false
|
49 |
-
train.pref_beta: 0.1
|
50 |
-
train.pref_ftx: 0
|
51 |
-
train.pref_loss: sigmoid
|
52 |
-
train.report_to: false
|
53 |
-
train.resize_vocab: false
|
54 |
-
train.reward_model: null
|
55 |
-
train.save_steps: 1000
|
56 |
-
train.shift_attn: false
|
57 |
-
train.training_stage: Supervised Fine-Tuning
|
58 |
-
train.use_badam: false
|
59 |
-
train.use_dora: false
|
60 |
-
train.use_galore: false
|
61 |
-
train.use_llama_pro: false
|
62 |
-
train.use_pissa: false
|
63 |
-
train.use_rslora: false
|
64 |
-
train.val_size: 0
|
65 |
-
train.warmup_steps: 10
|
|
|
1 |
+
eval.batch_size: 2
|
2 |
+
eval.cutoff_len: 1024
|
3 |
+
eval.dataset:
|
4 |
+
- truth_dev_0716_2
|
5 |
+
eval.dataset_dir: data
|
6 |
+
eval.max_new_tokens: 512
|
7 |
+
eval.max_samples: '100000'
|
8 |
+
eval.output_dir: eval_2024-07-16-17-27-37
|
9 |
+
eval.predict: true
|
10 |
+
eval.temperature: 0.95
|
11 |
+
eval.top_p: 0.7
|
12 |
top.booster: auto
|
13 |
+
top.checkpoint_path: train_2024-07-16-16-48-49_llama2_2
|
14 |
top.finetuning_type: full
|
15 |
top.model_name: LLaMA2-7B-Chat
|
16 |
top.quantization_bit: none
|
|
|
18 |
top.rope_scaling: none
|
19 |
top.template: llama2
|
20 |
top.visual_inputs: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
predict_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"predict_bleu-4": 83.34018267405064,
|
3 |
+
"predict_rouge-1": 89.24050632911393,
|
4 |
+
"predict_rouge-2": 0.0,
|
5 |
+
"predict_rouge-l": 89.24050632911393,
|
6 |
+
"predict_runtime": 10.2347,
|
7 |
+
"predict_samples_per_second": 122.427,
|
8 |
+
"predict_steps_per_second": 7.719
|
9 |
+
}
|
running_log.txt
CHANGED
@@ -1,63 +1,49 @@
|
|
1 |
-
07
|
2 |
|
3 |
-
[INFO|
|
4 |
|
5 |
-
|
6 |
|
7 |
-
07/16/2024
|
8 |
|
9 |
-
07
|
10 |
|
11 |
-
|
12 |
|
13 |
-
|
14 |
|
15 |
-
[INFO|
|
16 |
|
17 |
-
|
18 |
|
19 |
-
|
20 |
|
21 |
-
|
22 |
|
23 |
-
|
24 |
|
25 |
-
07/16/2024
|
26 |
|
27 |
-
|
28 |
|
29 |
-
|
30 |
|
31 |
-
07/16/2024
|
32 |
|
33 |
-
07/16/2024
|
34 |
|
35 |
-
07/16/2024
|
36 |
|
37 |
-
07/16/2024
|
38 |
|
39 |
-
07/16/2024
|
40 |
|
41 |
-
07/16/2024
|
42 |
|
43 |
-
07
|
44 |
|
45 |
-
07
|
46 |
-
|
47 |
-
07/16/2024 16:50:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
48 |
-
|
49 |
-
07/16/2024 16:50:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
50 |
-
|
51 |
-
07/16/2024 16:50:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
52 |
-
|
53 |
-
07/16/2024 16:50:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
54 |
-
|
55 |
-
07/16/2024 16:50:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
56 |
-
|
57 |
-
[INFO|configuration_utils.py:733] 2024-07-16 16:50:20,327 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-2-7b-chat-hf/snapshots/f5db02db724555f92da89c216ac04704f23d4590/config.json
|
58 |
-
|
59 |
-
[INFO|configuration_utils.py:800] 2024-07-16 16:50:20,328 >> Model config LlamaConfig {
|
60 |
-
"_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
|
61 |
"architectures": [
|
62 |
"LlamaForCausalLM"
|
63 |
],
|
@@ -80,32 +66,48 @@
|
|
80 |
"rope_scaling": null,
|
81 |
"rope_theta": 10000.0,
|
82 |
"tie_word_embeddings": false,
|
83 |
-
"torch_dtype": "
|
84 |
"transformers_version": "4.42.3",
|
85 |
-
"use_cache":
|
86 |
"vocab_size": 32000
|
87 |
}
|
88 |
|
89 |
|
90 |
-
[INFO|
|
|
|
|
|
91 |
|
92 |
-
[INFO|modeling_utils.py:1531] 2024-07-16
|
93 |
|
94 |
-
[INFO|configuration_utils.py:1000] 2024-07-16
|
95 |
"bos_token_id": 1,
|
96 |
"eos_token_id": 2
|
97 |
}
|
98 |
|
99 |
|
100 |
-
|
101 |
|
|
|
102 |
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
105 |
|
106 |
-
[INFO|configuration_utils.py:
|
107 |
|
108 |
-
[INFO|configuration_utils.py:1000] 2024-07-16
|
109 |
"bos_token_id": 1,
|
110 |
"do_sample": true,
|
111 |
"eos_token_id": 2,
|
@@ -116,518 +118,60 @@ If your task is similar to the task the model of the checkpoint was trained on,
|
|
116 |
}
|
117 |
|
118 |
|
119 |
-
[INFO|
|
120 |
-
|
121 |
-
[INFO|attention.py:80] 2024-07-16 16:50:37,746 >> Using torch SDPA for faster training and inference.
|
122 |
-
|
123 |
-
[INFO|adapter.py:302] 2024-07-16 16:50:37,746 >> Upcasting trainable params to float32.
|
124 |
-
|
125 |
-
[INFO|adapter.py:48] 2024-07-16 16:50:37,746 >> Fine-tuning method: Full
|
126 |
-
|
127 |
-
[INFO|loader.py:196] 2024-07-16 16:50:37,798 >> trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
128 |
-
|
129 |
-
07/16/2024 16:50:37 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
130 |
-
|
131 |
-
07/16/2024 16:50:37 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
132 |
-
|
133 |
-
07/16/2024 16:50:37 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
134 |
-
|
135 |
-
07/16/2024 16:50:37 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
136 |
-
|
137 |
-
07/16/2024 16:50:37 - INFO - llamafactory.model.loader - trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
138 |
-
|
139 |
-
[INFO|trainer.py:642] 2024-07-16 16:50:37,804 >> Using auto half precision backend
|
140 |
-
|
141 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
142 |
-
|
143 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
144 |
-
|
145 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
146 |
-
|
147 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
148 |
-
|
149 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
150 |
-
|
151 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
152 |
-
|
153 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
154 |
-
|
155 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
156 |
-
|
157 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
158 |
-
|
159 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
160 |
-
|
161 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
162 |
-
|
163 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
164 |
-
|
165 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.loader - trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
166 |
-
|
167 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.loader - trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
168 |
-
|
169 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.loader - trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
170 |
-
|
171 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
172 |
-
|
173 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
174 |
-
|
175 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
176 |
-
|
177 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
178 |
-
|
179 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.loader - trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
180 |
-
|
181 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
182 |
-
|
183 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
184 |
-
|
185 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
186 |
-
|
187 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
188 |
-
|
189 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.loader - trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
190 |
-
|
191 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
192 |
-
|
193 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
194 |
-
|
195 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
196 |
-
|
197 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
198 |
-
|
199 |
-
07/16/2024 16:50:38 - INFO - llamafactory.model.loader - trainable params: 6,738,415,616 || all params: 6,738,415,616 || trainable%: 100.0000
|
200 |
-
|
201 |
-
[INFO|trainer.py:2128] 2024-07-16 16:50:57,380 >> ***** Running training *****
|
202 |
-
|
203 |
-
[INFO|trainer.py:2129] 2024-07-16 16:50:57,380 >> Num examples = 4,958
|
204 |
-
|
205 |
-
[INFO|trainer.py:2130] 2024-07-16 16:50:57,380 >> Num Epochs = 5
|
206 |
-
|
207 |
-
[INFO|trainer.py:2131] 2024-07-16 16:50:57,380 >> Instantaneous batch size per device = 2
|
208 |
-
|
209 |
-
[INFO|trainer.py:2134] 2024-07-16 16:50:57,380 >> Total train batch size (w. parallel, distributed & accumulation) = 128
|
210 |
-
|
211 |
-
[INFO|trainer.py:2135] 2024-07-16 16:50:57,380 >> Gradient Accumulation steps = 8
|
212 |
-
|
213 |
-
[INFO|trainer.py:2136] 2024-07-16 16:50:57,380 >> Total optimization steps = 190
|
214 |
-
|
215 |
-
[INFO|trainer.py:2137] 2024-07-16 16:50:57,381 >> Number of trainable parameters = 6,738,415,616
|
216 |
-
|
217 |
-
[INFO|callbacks.py:310] 2024-07-16 16:51:09,938 >> {'loss': 8.2514, 'learning_rate': 5.0000e-07, 'epoch': 0.03, 'throughput': 545.43}
|
218 |
-
|
219 |
-
[INFO|callbacks.py:310] 2024-07-16 16:51:20,951 >> {'loss': 8.2793, 'learning_rate': 1.0000e-06, 'epoch': 0.05, 'throughput': 584.51}
|
220 |
-
|
221 |
-
[INFO|callbacks.py:310] 2024-07-16 16:51:31,943 >> {'loss': 8.1700, 'learning_rate': 1.5000e-06, 'epoch': 0.08, 'throughput': 598.15}
|
222 |
-
|
223 |
-
[INFO|callbacks.py:310] 2024-07-16 16:51:42,923 >> {'loss': 7.6197, 'learning_rate': 2.0000e-06, 'epoch': 0.10, 'throughput': 609.21}
|
224 |
-
|
225 |
-
[INFO|callbacks.py:310] 2024-07-16 16:51:53,920 >> {'loss': 6.9491, 'learning_rate': 2.5000e-06, 'epoch': 0.13, 'throughput': 612.41}
|
226 |
-
|
227 |
-
[INFO|callbacks.py:310] 2024-07-16 16:52:04,919 >> {'loss': 5.2054, 'learning_rate': 3.0000e-06, 'epoch': 0.15, 'throughput': 613.36}
|
228 |
-
|
229 |
-
[INFO|callbacks.py:310] 2024-07-16 16:52:15,920 >> {'loss': 4.8642, 'learning_rate': 3.5000e-06, 'epoch': 0.18, 'throughput': 615.05}
|
230 |
-
|
231 |
-
[INFO|callbacks.py:310] 2024-07-16 16:52:26,924 >> {'loss': 3.2874, 'learning_rate': 4.0000e-06, 'epoch': 0.21, 'throughput': 615.94}
|
232 |
-
|
233 |
-
[INFO|callbacks.py:310] 2024-07-16 16:52:37,962 >> {'loss': 2.6310, 'learning_rate': 4.5000e-06, 'epoch': 0.23, 'throughput': 613.25}
|
234 |
-
|
235 |
-
[INFO|callbacks.py:310] 2024-07-16 16:52:48,988 >> {'loss': 0.6982, 'learning_rate': 5.0000e-06, 'epoch': 0.26, 'throughput': 613.59}
|
236 |
-
|
237 |
-
[INFO|callbacks.py:310] 2024-07-16 16:53:00,018 >> {'loss': 0.3276, 'learning_rate': 4.9996e-06, 'epoch': 0.28, 'throughput': 613.98}
|
238 |
-
|
239 |
-
[INFO|callbacks.py:310] 2024-07-16 16:53:11,029 >> {'loss': 0.2930, 'learning_rate': 4.9985e-06, 'epoch': 0.31, 'throughput': 615.72}
|
240 |
-
|
241 |
-
[INFO|callbacks.py:310] 2024-07-16 16:53:22,020 >> {'loss': 0.2129, 'learning_rate': 4.9966e-06, 'epoch': 0.34, 'throughput': 615.94}
|
242 |
-
|
243 |
-
[INFO|callbacks.py:310] 2024-07-16 16:53:33,021 >> {'loss': 0.4712, 'learning_rate': 4.9939e-06, 'epoch': 0.36, 'throughput': 616.20}
|
244 |
-
|
245 |
-
[INFO|callbacks.py:310] 2024-07-16 16:53:44,003 >> {'loss': 0.2350, 'learning_rate': 4.9905e-06, 'epoch': 0.39, 'throughput': 617.55}
|
246 |
-
|
247 |
-
[INFO|callbacks.py:310] 2024-07-16 16:53:55,026 >> {'loss': 0.2020, 'learning_rate': 4.9863e-06, 'epoch': 0.41, 'throughput': 618.05}
|
248 |
-
|
249 |
-
[INFO|callbacks.py:310] 2024-07-16 16:54:06,023 >> {'loss': 0.1981, 'learning_rate': 4.9814e-06, 'epoch': 0.44, 'throughput': 617.73}
|
250 |
-
|
251 |
-
[INFO|callbacks.py:310] 2024-07-16 16:54:17,028 >> {'loss': 0.1517, 'learning_rate': 4.9757e-06, 'epoch': 0.46, 'throughput': 617.10}
|
252 |
-
|
253 |
-
[INFO|callbacks.py:310] 2024-07-16 16:54:28,037 >> {'loss': 0.4335, 'learning_rate': 4.9692e-06, 'epoch': 0.49, 'throughput': 617.28}
|
254 |
-
|
255 |
-
[INFO|callbacks.py:310] 2024-07-16 16:54:39,050 >> {'loss': 0.3609, 'learning_rate': 4.9620e-06, 'epoch': 0.52, 'throughput': 617.29}
|
256 |
-
|
257 |
-
[INFO|callbacks.py:310] 2024-07-16 16:54:50,034 >> {'loss': 0.1708, 'learning_rate': 4.9541e-06, 'epoch': 0.54, 'throughput': 618.54}
|
258 |
-
|
259 |
-
[INFO|callbacks.py:310] 2024-07-16 16:55:01,020 >> {'loss': 0.2277, 'learning_rate': 4.9454e-06, 'epoch': 0.57, 'throughput': 617.70}
|
260 |
-
|
261 |
-
[INFO|callbacks.py:310] 2024-07-16 16:55:12,039 >> {'loss': 0.3437, 'learning_rate': 4.9359e-06, 'epoch': 0.59, 'throughput': 617.93}
|
262 |
-
|
263 |
-
[INFO|callbacks.py:310] 2024-07-16 16:55:23,067 >> {'loss': 0.2229, 'learning_rate': 4.9257e-06, 'epoch': 0.62, 'throughput': 619.02}
|
264 |
-
|
265 |
-
[INFO|callbacks.py:310] 2024-07-16 16:55:34,096 >> {'loss': 0.1242, 'learning_rate': 4.9148e-06, 'epoch': 0.65, 'throughput': 617.82}
|
266 |
-
|
267 |
-
[INFO|callbacks.py:310] 2024-07-16 16:55:45,128 >> {'loss': 0.2117, 'learning_rate': 4.9032e-06, 'epoch': 0.67, 'throughput': 617.94}
|
268 |
-
|
269 |
-
[INFO|callbacks.py:310] 2024-07-16 16:55:56,152 >> {'loss': 0.2706, 'learning_rate': 4.8908e-06, 'epoch': 0.70, 'throughput': 618.70}
|
270 |
-
|
271 |
-
[INFO|callbacks.py:310] 2024-07-16 16:56:07,175 >> {'loss': 0.2084, 'learning_rate': 4.8776e-06, 'epoch': 0.72, 'throughput': 618.27}
|
272 |
-
|
273 |
-
[INFO|callbacks.py:310] 2024-07-16 16:56:18,165 >> {'loss': 0.0981, 'learning_rate': 4.8638e-06, 'epoch': 0.75, 'throughput': 618.39}
|
274 |
-
|
275 |
-
[INFO|callbacks.py:310] 2024-07-16 16:56:29,154 >> {'loss': 0.1600, 'learning_rate': 4.8492e-06, 'epoch': 0.77, 'throughput': 618.50}
|
276 |
-
|
277 |
-
[INFO|callbacks.py:310] 2024-07-16 16:56:40,149 >> {'loss': 0.1614, 'learning_rate': 4.8340e-06, 'epoch': 0.80, 'throughput': 617.80}
|
278 |
-
|
279 |
-
[INFO|callbacks.py:310] 2024-07-16 16:56:51,163 >> {'loss': 0.1742, 'learning_rate': 4.8180e-06, 'epoch': 0.83, 'throughput': 617.47}
|
280 |
-
|
281 |
-
[INFO|callbacks.py:310] 2024-07-16 16:57:02,179 >> {'loss': 0.1107, 'learning_rate': 4.8013e-06, 'epoch': 0.85, 'throughput': 617.90}
|
282 |
-
|
283 |
-
[INFO|callbacks.py:310] 2024-07-16 16:57:13,192 >> {'loss': 0.0822, 'learning_rate': 4.7839e-06, 'epoch': 0.88, 'throughput': 617.42}
|
284 |
-
|
285 |
-
[INFO|callbacks.py:310] 2024-07-16 16:57:24,203 >> {'loss': 0.1873, 'learning_rate': 4.7658e-06, 'epoch': 0.90, 'throughput': 617.01}
|
286 |
-
|
287 |
-
[INFO|callbacks.py:310] 2024-07-16 16:57:35,243 >> {'loss': 0.2375, 'learning_rate': 4.7470e-06, 'epoch': 0.93, 'throughput': 616.94}
|
288 |
-
|
289 |
-
[INFO|callbacks.py:310] 2024-07-16 16:57:46,259 >> {'loss': 0.2667, 'learning_rate': 4.7275e-06, 'epoch': 0.95, 'throughput': 617.73}
|
290 |
-
|
291 |
-
[INFO|callbacks.py:310] 2024-07-16 16:57:57,247 >> {'loss': 0.1547, 'learning_rate': 4.7074e-06, 'epoch': 0.98, 'throughput': 618.14}
|
292 |
-
|
293 |
-
[INFO|callbacks.py:310] 2024-07-16 16:58:08,231 >> {'loss': 0.1662, 'learning_rate': 4.6865e-06, 'epoch': 1.01, 'throughput': 618.69}
|
294 |
-
|
295 |
-
[INFO|callbacks.py:310] 2024-07-16 16:58:19,239 >> {'loss': 0.0808, 'learning_rate': 4.6651e-06, 'epoch': 1.03, 'throughput': 618.41}
|
296 |
-
|
297 |
-
[INFO|callbacks.py:310] 2024-07-16 16:58:30,245 >> {'loss': 0.0884, 'learning_rate': 4.6429e-06, 'epoch': 1.06, 'throughput': 618.04}
|
298 |
-
|
299 |
-
[INFO|callbacks.py:310] 2024-07-16 16:58:41,281 >> {'loss': 0.0883, 'learning_rate': 4.6201e-06, 'epoch': 1.08, 'throughput': 618.55}
|
300 |
-
|
301 |
-
[INFO|callbacks.py:310] 2024-07-16 16:58:52,323 >> {'loss': 0.0562, 'learning_rate': 4.5967e-06, 'epoch': 1.11, 'throughput': 618.59}
|
302 |
-
|
303 |
-
[INFO|callbacks.py:310] 2024-07-16 16:59:03,347 >> {'loss': 0.0856, 'learning_rate': 4.5726e-06, 'epoch': 1.14, 'throughput': 618.38}
|
304 |
-
|
305 |
-
[INFO|callbacks.py:310] 2024-07-16 16:59:14,361 >> {'loss': 0.0612, 'learning_rate': 4.5479e-06, 'epoch': 1.16, 'throughput': 618.26}
|
306 |
-
|
307 |
-
[INFO|callbacks.py:310] 2024-07-16 16:59:25,365 >> {'loss': 0.0944, 'learning_rate': 4.5225e-06, 'epoch': 1.19, 'throughput': 618.35}
|
308 |
-
|
309 |
-
[INFO|callbacks.py:310] 2024-07-16 16:59:36,390 >> {'loss': 0.0624, 'learning_rate': 4.4966e-06, 'epoch': 1.21, 'throughput': 618.23}
|
310 |
-
|
311 |
-
[INFO|callbacks.py:310] 2024-07-16 16:59:47,376 >> {'loss': 0.0363, 'learning_rate': 4.4700e-06, 'epoch': 1.24, 'throughput': 618.21}
|
312 |
-
|
313 |
-
[INFO|callbacks.py:310] 2024-07-16 16:59:58,399 >> {'loss': 0.1039, 'learning_rate': 4.4429e-06, 'epoch': 1.26, 'throughput': 618.16}
|
314 |
-
|
315 |
-
[INFO|callbacks.py:310] 2024-07-16 17:00:09,394 >> {'loss': 0.0488, 'learning_rate': 4.4151e-06, 'epoch': 1.29, 'throughput': 618.19}
|
316 |
-
|
317 |
-
[INFO|callbacks.py:310] 2024-07-16 17:00:20,399 >> {'loss': 0.0613, 'learning_rate': 4.3868e-06, 'epoch': 1.32, 'throughput': 618.44}
|
318 |
-
|
319 |
-
[INFO|callbacks.py:310] 2024-07-16 17:00:31,407 >> {'loss': 0.0700, 'learning_rate': 4.3579e-06, 'epoch': 1.34, 'throughput': 618.12}
|
320 |
-
|
321 |
-
[INFO|callbacks.py:310] 2024-07-16 17:00:42,424 >> {'loss': 0.0463, 'learning_rate': 4.3284e-06, 'epoch': 1.37, 'throughput': 618.08}
|
322 |
-
|
323 |
-
[INFO|callbacks.py:310] 2024-07-16 17:00:53,462 >> {'loss': 0.0671, 'learning_rate': 4.2983e-06, 'epoch': 1.39, 'throughput': 618.15}
|
324 |
-
|
325 |
-
[INFO|callbacks.py:310] 2024-07-16 17:01:04,468 >> {'loss': 0.0428, 'learning_rate': 4.2678e-06, 'epoch': 1.42, 'throughput': 618.43}
|
326 |
-
|
327 |
-
[INFO|callbacks.py:310] 2024-07-16 17:01:15,463 >> {'loss': 0.0678, 'learning_rate': 4.2366e-06, 'epoch': 1.45, 'throughput': 618.43}
|
328 |
-
|
329 |
-
[INFO|callbacks.py:310] 2024-07-16 17:01:26,456 >> {'loss': 0.0476, 'learning_rate': 4.2050e-06, 'epoch': 1.47, 'throughput': 618.38}
|
330 |
-
|
331 |
-
[INFO|callbacks.py:310] 2024-07-16 17:01:37,444 >> {'loss': 0.0442, 'learning_rate': 4.1728e-06, 'epoch': 1.50, 'throughput': 618.82}
|
332 |
-
|
333 |
-
[INFO|callbacks.py:310] 2024-07-16 17:01:48,428 >> {'loss': 0.0336, 'learning_rate': 4.1401e-06, 'epoch': 1.52, 'throughput': 619.09}
|
334 |
-
|
335 |
-
[INFO|callbacks.py:310] 2024-07-16 17:01:59,445 >> {'loss': 0.0460, 'learning_rate': 4.1070e-06, 'epoch': 1.55, 'throughput': 618.77}
|
336 |
-
|
337 |
-
[INFO|callbacks.py:310] 2024-07-16 17:02:10,459 >> {'loss': 0.0416, 'learning_rate': 4.0733e-06, 'epoch': 1.57, 'throughput': 618.46}
|
338 |
-
|
339 |
-
[INFO|callbacks.py:310] 2024-07-16 17:02:21,470 >> {'loss': 0.0649, 'learning_rate': 4.0392e-06, 'epoch': 1.60, 'throughput': 618.87}
|
340 |
-
|
341 |
-
[INFO|callbacks.py:310] 2024-07-16 17:02:32,483 >> {'loss': 0.0591, 'learning_rate': 4.0045e-06, 'epoch': 1.63, 'throughput': 619.05}
|
342 |
-
|
343 |
-
[INFO|callbacks.py:310] 2024-07-16 17:02:43,490 >> {'loss': 0.0318, 'learning_rate': 3.9695e-06, 'epoch': 1.65, 'throughput': 618.83}
|
344 |
-
|
345 |
-
[INFO|callbacks.py:310] 2024-07-16 17:02:54,478 >> {'loss': 0.0462, 'learning_rate': 3.9339e-06, 'epoch': 1.68, 'throughput': 618.87}
|
346 |
-
|
347 |
-
[INFO|callbacks.py:310] 2024-07-16 17:03:05,466 >> {'loss': 0.0465, 'learning_rate': 3.8980e-06, 'epoch': 1.70, 'throughput': 618.98}
|
348 |
-
|
349 |
-
[INFO|callbacks.py:310] 2024-07-16 17:03:16,480 >> {'loss': 0.0316, 'learning_rate': 3.8616e-06, 'epoch': 1.73, 'throughput': 619.15}
|
350 |
-
|
351 |
-
[INFO|callbacks.py:310] 2024-07-16 17:03:27,480 >> {'loss': 0.1000, 'learning_rate': 3.8248e-06, 'epoch': 1.75, 'throughput': 619.38}
|
352 |
-
|
353 |
-
[INFO|callbacks.py:310] 2024-07-16 17:03:38,513 >> {'loss': 0.0711, 'learning_rate': 3.7876e-06, 'epoch': 1.78, 'throughput': 619.25}
|
354 |
-
|
355 |
-
[INFO|callbacks.py:310] 2024-07-16 17:03:49,506 >> {'loss': 0.0494, 'learning_rate': 3.7500e-06, 'epoch': 1.81, 'throughput': 619.69}
|
356 |
-
|
357 |
-
[INFO|callbacks.py:310] 2024-07-16 17:04:00,519 >> {'loss': 0.0618, 'learning_rate': 3.7120e-06, 'epoch': 1.83, 'throughput': 619.64}
|
358 |
-
|
359 |
-
[INFO|callbacks.py:310] 2024-07-16 17:04:11,528 >> {'loss': 0.0511, 'learning_rate': 3.6737e-06, 'epoch': 1.86, 'throughput': 619.61}
|
360 |
-
|
361 |
-
[INFO|callbacks.py:310] 2024-07-16 17:04:22,525 >> {'loss': 0.0464, 'learning_rate': 3.6350e-06, 'epoch': 1.88, 'throughput': 619.64}
|
362 |
-
|
363 |
-
[INFO|callbacks.py:310] 2024-07-16 17:04:33,509 >> {'loss': 0.0331, 'learning_rate': 3.5959e-06, 'epoch': 1.91, 'throughput': 620.02}
|
364 |
-
|
365 |
-
[INFO|callbacks.py:310] 2024-07-16 17:04:44,504 >> {'loss': 0.0706, 'learning_rate': 3.5565e-06, 'epoch': 1.94, 'throughput': 620.04}
|
366 |
-
|
367 |
-
[INFO|callbacks.py:310] 2024-07-16 17:04:55,490 >> {'loss': 0.0442, 'learning_rate': 3.5168e-06, 'epoch': 1.96, 'throughput': 620.14}
|
368 |
-
|
369 |
-
[INFO|callbacks.py:310] 2024-07-16 17:05:06,484 >> {'loss': 0.0420, 'learning_rate': 3.4768e-06, 'epoch': 1.99, 'throughput': 619.89}
|
370 |
-
|
371 |
-
[INFO|callbacks.py:310] 2024-07-16 17:05:17,496 >> {'loss': 0.0210, 'learning_rate': 3.4365e-06, 'epoch': 2.01, 'throughput': 619.84}
|
372 |
-
|
373 |
-
[INFO|callbacks.py:310] 2024-07-16 17:05:28,503 >> {'loss': 0.0094, 'learning_rate': 3.3959e-06, 'epoch': 2.04, 'throughput': 619.91}
|
374 |
-
|
375 |
-
[INFO|callbacks.py:310] 2024-07-16 17:05:39,520 >> {'loss': 0.0021, 'learning_rate': 3.3551e-06, 'epoch': 2.06, 'throughput': 620.26}
|
376 |
-
|
377 |
-
[INFO|callbacks.py:310] 2024-07-16 17:05:50,557 >> {'loss': 0.0146, 'learning_rate': 3.3139e-06, 'epoch': 2.09, 'throughput': 620.13}
|
378 |
-
|
379 |
-
[INFO|callbacks.py:310] 2024-07-16 17:06:01,557 >> {'loss': 0.0237, 'learning_rate': 3.2725e-06, 'epoch': 2.12, 'throughput': 620.48}
|
380 |
-
|
381 |
-
[INFO|callbacks.py:310] 2024-07-16 17:06:12,563 >> {'loss': 0.0031, 'learning_rate': 3.2309e-06, 'epoch': 2.14, 'throughput': 620.57}
|
382 |
-
|
383 |
-
[INFO|callbacks.py:310] 2024-07-16 17:06:23,576 >> {'loss': 0.0034, 'learning_rate': 3.1891e-06, 'epoch': 2.17, 'throughput': 620.54}
|
384 |
-
|
385 |
-
[INFO|callbacks.py:310] 2024-07-16 17:06:34,571 >> {'loss': 0.0045, 'learning_rate': 3.1470e-06, 'epoch': 2.19, 'throughput': 620.66}
|
386 |
-
|
387 |
-
[INFO|callbacks.py:310] 2024-07-16 17:06:45,568 >> {'loss': 0.0031, 'learning_rate': 3.1048e-06, 'epoch': 2.22, 'throughput': 620.62}
|
388 |
-
|
389 |
-
[INFO|callbacks.py:310] 2024-07-16 17:06:56,589 >> {'loss': 0.0341, 'learning_rate': 3.0624e-06, 'epoch': 2.25, 'throughput': 620.63}
|
390 |
-
|
391 |
-
[INFO|callbacks.py:310] 2024-07-16 17:07:07,621 >> {'loss': 0.0095, 'learning_rate': 3.0198e-06, 'epoch': 2.27, 'throughput': 620.55}
|
392 |
-
|
393 |
-
[INFO|callbacks.py:310] 2024-07-16 17:07:18,643 >> {'loss': 0.0459, 'learning_rate': 2.9770e-06, 'epoch': 2.30, 'throughput': 620.80}
|
394 |
-
|
395 |
-
[INFO|callbacks.py:310] 2024-07-16 17:07:29,659 >> {'loss': 0.0104, 'learning_rate': 2.9341e-06, 'epoch': 2.32, 'throughput': 620.80}
|
396 |
-
|
397 |
-
[INFO|callbacks.py:310] 2024-07-16 17:07:40,662 >> {'loss': 0.0201, 'learning_rate': 2.8911e-06, 'epoch': 2.35, 'throughput': 620.51}
|
398 |
-
|
399 |
-
[INFO|callbacks.py:310] 2024-07-16 17:07:51,636 >> {'loss': 0.0021, 'learning_rate': 2.8479e-06, 'epoch': 2.37, 'throughput': 620.75}
|
400 |
-
|
401 |
-
[INFO|callbacks.py:310] 2024-07-16 17:08:02,651 >> {'loss': 0.0430, 'learning_rate': 2.8047e-06, 'epoch': 2.40, 'throughput': 620.64}
|
402 |
-
|
403 |
-
[INFO|callbacks.py:310] 2024-07-16 17:08:13,671 >> {'loss': 0.0207, 'learning_rate': 2.7613e-06, 'epoch': 2.43, 'throughput': 620.61}
|
404 |
-
|
405 |
-
[INFO|callbacks.py:310] 2024-07-16 17:08:24,677 >> {'loss': 0.0148, 'learning_rate': 2.7179e-06, 'epoch': 2.45, 'throughput': 620.69}
|
406 |
-
|
407 |
-
[INFO|callbacks.py:310] 2024-07-16 17:08:35,700 >> {'loss': 0.0040, 'learning_rate': 2.6744e-06, 'epoch': 2.48, 'throughput': 620.55}
|
408 |
-
|
409 |
-
[INFO|callbacks.py:310] 2024-07-16 17:08:46,703 >> {'loss': 0.0131, 'learning_rate': 2.6308e-06, 'epoch': 2.50, 'throughput': 620.54}
|
410 |
-
|
411 |
-
[INFO|callbacks.py:310] 2024-07-16 17:08:57,742 >> {'loss': 0.0455, 'learning_rate': 2.5872e-06, 'epoch': 2.53, 'throughput': 620.37}
|
412 |
-
|
413 |
-
[INFO|callbacks.py:310] 2024-07-16 17:09:08,772 >> {'loss': 0.0031, 'learning_rate': 2.5436e-06, 'epoch': 2.55, 'throughput': 620.27}
|
414 |
-
|
415 |
-
[INFO|callbacks.py:310] 2024-07-16 17:09:19,760 >> {'loss': 0.0099, 'learning_rate': 2.5000e-06, 'epoch': 2.58, 'throughput': 620.49}
|
416 |
-
|
417 |
-
[INFO|callbacks.py:310] 2024-07-16 17:09:30,748 >> {'loss': 0.0797, 'learning_rate': 2.4564e-06, 'epoch': 2.61, 'throughput': 620.49}
|
418 |
-
|
419 |
-
[INFO|callbacks.py:310] 2024-07-16 17:09:41,738 >> {'loss': 0.0059, 'learning_rate': 2.4128e-06, 'epoch': 2.63, 'throughput': 620.63}
|
420 |
-
|
421 |
-
[INFO|callbacks.py:310] 2024-07-16 17:09:52,737 >> {'loss': 0.0438, 'learning_rate': 2.3692e-06, 'epoch': 2.66, 'throughput': 620.38}
|
422 |
-
|
423 |
-
[INFO|callbacks.py:310] 2024-07-16 17:10:03,734 >> {'loss': 0.0149, 'learning_rate': 2.3256e-06, 'epoch': 2.68, 'throughput': 620.63}
|
424 |
-
|
425 |
-
[INFO|callbacks.py:310] 2024-07-16 17:10:14,743 >> {'loss': 0.0126, 'learning_rate': 2.2821e-06, 'epoch': 2.71, 'throughput': 620.57}
|
426 |
-
|
427 |
-
[INFO|callbacks.py:310] 2024-07-16 17:10:25,754 >> {'loss': 0.0255, 'learning_rate': 2.2387e-06, 'epoch': 2.74, 'throughput': 620.46}
|
428 |
-
|
429 |
-
[INFO|callbacks.py:310] 2024-07-16 17:10:36,760 >> {'loss': 0.0048, 'learning_rate': 2.1953e-06, 'epoch': 2.76, 'throughput': 620.34}
|
430 |
-
|
431 |
-
[INFO|callbacks.py:310] 2024-07-16 17:10:47,758 >> {'loss': 0.0142, 'learning_rate': 2.1521e-06, 'epoch': 2.79, 'throughput': 620.20}
|
432 |
-
|
433 |
-
[INFO|callbacks.py:310] 2024-07-16 17:10:58,726 >> {'loss': 0.0193, 'learning_rate': 2.1089e-06, 'epoch': 2.81, 'throughput': 620.23}
|
434 |
-
|
435 |
-
[INFO|callbacks.py:310] 2024-07-16 17:11:09,695 >> {'loss': 0.0055, 'learning_rate': 2.0659e-06, 'epoch': 2.84, 'throughput': 620.32}
|
436 |
-
|
437 |
-
[INFO|callbacks.py:310] 2024-07-16 17:11:20,678 >> {'loss': 0.0144, 'learning_rate': 2.0230e-06, 'epoch': 2.86, 'throughput': 620.23}
|
438 |
-
|
439 |
-
[INFO|callbacks.py:310] 2024-07-16 17:11:31,656 >> {'loss': 0.0272, 'learning_rate': 1.9802e-06, 'epoch': 2.89, 'throughput': 620.22}
|
440 |
-
|
441 |
-
[INFO|callbacks.py:310] 2024-07-16 17:11:42,652 >> {'loss': 0.0101, 'learning_rate': 1.9376e-06, 'epoch': 2.92, 'throughput': 620.18}
|
442 |
-
|
443 |
-
[INFO|callbacks.py:310] 2024-07-16 17:11:53,647 >> {'loss': 0.0109, 'learning_rate': 1.8952e-06, 'epoch': 2.94, 'throughput': 620.51}
|
444 |
-
|
445 |
-
[INFO|callbacks.py:310] 2024-07-16 17:12:04,642 >> {'loss': 0.0180, 'learning_rate': 1.8530e-06, 'epoch': 2.97, 'throughput': 620.55}
|
446 |
-
|
447 |
-
[INFO|callbacks.py:310] 2024-07-16 17:12:15,639 >> {'loss': 0.0141, 'learning_rate': 1.8109e-06, 'epoch': 2.99, 'throughput': 620.38}
|
448 |
-
|
449 |
-
[INFO|callbacks.py:310] 2024-07-16 17:12:26,637 >> {'loss': 0.0057, 'learning_rate': 1.7691e-06, 'epoch': 3.02, 'throughput': 620.36}
|
450 |
-
|
451 |
-
[INFO|callbacks.py:310] 2024-07-16 17:12:37,628 >> {'loss': 0.0063, 'learning_rate': 1.7275e-06, 'epoch': 3.05, 'throughput': 620.33}
|
452 |
-
|
453 |
-
[INFO|callbacks.py:310] 2024-07-16 17:12:48,606 >> {'loss': 0.0138, 'learning_rate': 1.6861e-06, 'epoch': 3.07, 'throughput': 620.27}
|
454 |
-
|
455 |
-
[INFO|callbacks.py:310] 2024-07-16 17:12:59,593 >> {'loss': 0.0011, 'learning_rate': 1.6449e-06, 'epoch': 3.10, 'throughput': 619.97}
|
456 |
-
|
457 |
-
[INFO|callbacks.py:310] 2024-07-16 17:13:10,574 >> {'loss': 0.0006, 'learning_rate': 1.6041e-06, 'epoch': 3.12, 'throughput': 619.94}
|
458 |
-
|
459 |
-
[INFO|callbacks.py:310] 2024-07-16 17:13:21,574 >> {'loss': 0.0055, 'learning_rate': 1.5635e-06, 'epoch': 3.15, 'throughput': 619.97}
|
460 |
-
|
461 |
-
[INFO|callbacks.py:310] 2024-07-16 17:13:32,565 >> {'loss': 0.0011, 'learning_rate': 1.5232e-06, 'epoch': 3.17, 'throughput': 619.90}
|
462 |
-
|
463 |
-
[INFO|callbacks.py:310] 2024-07-16 17:13:43,569 >> {'loss': 0.0173, 'learning_rate': 1.4832e-06, 'epoch': 3.20, 'throughput': 620.05}
|
464 |
-
|
465 |
-
[INFO|callbacks.py:310] 2024-07-16 17:13:54,579 >> {'loss': 0.0027, 'learning_rate': 1.4435e-06, 'epoch': 3.23, 'throughput': 619.89}
|
466 |
-
|
467 |
-
[INFO|callbacks.py:310] 2024-07-16 17:14:05,559 >> {'loss': 0.0029, 'learning_rate': 1.4041e-06, 'epoch': 3.25, 'throughput': 619.81}
|
468 |
-
|
469 |
-
[INFO|callbacks.py:310] 2024-07-16 17:14:16,541 >> {'loss': 0.0003, 'learning_rate': 1.3650e-06, 'epoch': 3.28, 'throughput': 619.97}
|
470 |
-
|
471 |
-
[INFO|callbacks.py:310] 2024-07-16 17:14:27,516 >> {'loss': 0.0007, 'learning_rate': 1.3263e-06, 'epoch': 3.30, 'throughput': 619.98}
|
472 |
-
|
473 |
-
[INFO|callbacks.py:310] 2024-07-16 17:14:38,496 >> {'loss': 0.0080, 'learning_rate': 1.2880e-06, 'epoch': 3.33, 'throughput': 620.05}
|
474 |
-
|
475 |
-
[INFO|callbacks.py:310] 2024-07-16 17:14:49,489 >> {'loss': 0.0004, 'learning_rate': 1.2500e-06, 'epoch': 3.35, 'throughput': 620.16}
|
476 |
-
|
477 |
-
[INFO|callbacks.py:310] 2024-07-16 17:15:00,489 >> {'loss': 0.0049, 'learning_rate': 1.2124e-06, 'epoch': 3.38, 'throughput': 620.39}
|
478 |
-
|
479 |
-
[INFO|callbacks.py:310] 2024-07-16 17:15:11,487 >> {'loss': 0.0012, 'learning_rate': 1.1752e-06, 'epoch': 3.41, 'throughput': 620.30}
|
480 |
-
|
481 |
-
[INFO|callbacks.py:310] 2024-07-16 17:15:22,486 >> {'loss': 0.0044, 'learning_rate': 1.1384e-06, 'epoch': 3.43, 'throughput': 620.50}
|
482 |
-
|
483 |
-
[INFO|callbacks.py:310] 2024-07-16 17:15:33,486 >> {'loss': 0.0017, 'learning_rate': 1.1020e-06, 'epoch': 3.46, 'throughput': 620.57}
|
484 |
-
|
485 |
-
[INFO|callbacks.py:310] 2024-07-16 17:15:44,480 >> {'loss': 0.0003, 'learning_rate': 1.0661e-06, 'epoch': 3.48, 'throughput': 620.49}
|
486 |
-
|
487 |
-
[INFO|callbacks.py:310] 2024-07-16 17:15:55,449 >> {'loss': 0.0099, 'learning_rate': 1.0305e-06, 'epoch': 3.51, 'throughput': 620.45}
|
488 |
-
|
489 |
-
[INFO|callbacks.py:310] 2024-07-16 17:16:06,411 >> {'loss': 0.0068, 'learning_rate': 9.9546e-07, 'epoch': 3.54, 'throughput': 620.34}
|
490 |
-
|
491 |
-
[INFO|callbacks.py:310] 2024-07-16 17:16:17,397 >> {'loss': 0.0025, 'learning_rate': 9.6085e-07, 'epoch': 3.56, 'throughput': 620.33}
|
492 |
-
|
493 |
-
[INFO|callbacks.py:310] 2024-07-16 17:16:28,378 >> {'loss': 0.0004, 'learning_rate': 9.2670e-07, 'epoch': 3.59, 'throughput': 620.50}
|
494 |
-
|
495 |
-
[INFO|callbacks.py:310] 2024-07-16 17:16:39,370 >> {'loss': 0.0101, 'learning_rate': 8.9303e-07, 'epoch': 3.61, 'throughput': 620.37}
|
496 |
-
|
497 |
-
[INFO|callbacks.py:310] 2024-07-16 17:16:50,370 >> {'loss': 0.0068, 'learning_rate': 8.5985e-07, 'epoch': 3.64, 'throughput': 620.41}
|
498 |
-
|
499 |
-
[INFO|callbacks.py:310] 2024-07-16 17:17:01,381 >> {'loss': 0.0007, 'learning_rate': 8.2717e-07, 'epoch': 3.66, 'throughput': 620.29}
|
500 |
-
|
501 |
-
[INFO|callbacks.py:310] 2024-07-16 17:17:12,379 >> {'loss': 0.0161, 'learning_rate': 7.9500e-07, 'epoch': 3.69, 'throughput': 620.19}
|
502 |
-
|
503 |
-
[INFO|callbacks.py:310] 2024-07-16 17:17:23,362 >> {'loss': 0.0115, 'learning_rate': 7.6335e-07, 'epoch': 3.72, 'throughput': 620.44}
|
504 |
-
|
505 |
-
[INFO|callbacks.py:310] 2024-07-16 17:17:34,347 >> {'loss': 0.0052, 'learning_rate': 7.3223e-07, 'epoch': 3.74, 'throughput': 620.49}
|
506 |
-
|
507 |
-
[INFO|callbacks.py:310] 2024-07-16 17:17:45,329 >> {'loss': 0.0098, 'learning_rate': 7.0165e-07, 'epoch': 3.77, 'throughput': 620.56}
|
508 |
-
|
509 |
-
[INFO|callbacks.py:310] 2024-07-16 17:17:56,308 >> {'loss': 0.0005, 'learning_rate': 6.7162e-07, 'epoch': 3.79, 'throughput': 620.74}
|
510 |
-
|
511 |
-
[INFO|callbacks.py:310] 2024-07-16 17:18:07,295 >> {'loss': 0.0012, 'learning_rate': 6.4214e-07, 'epoch': 3.82, 'throughput': 620.71}
|
512 |
-
|
513 |
-
[INFO|callbacks.py:310] 2024-07-16 17:18:18,292 >> {'loss': 0.0013, 'learning_rate': 6.1323e-07, 'epoch': 3.85, 'throughput': 620.61}
|
514 |
-
|
515 |
-
[INFO|callbacks.py:310] 2024-07-16 17:18:29,277 >> {'loss': 0.0003, 'learning_rate': 5.8489e-07, 'epoch': 3.87, 'throughput': 620.76}
|
516 |
-
|
517 |
-
[INFO|callbacks.py:310] 2024-07-16 17:18:40,277 >> {'loss': 0.0026, 'learning_rate': 5.5714e-07, 'epoch': 3.90, 'throughput': 620.61}
|
518 |
-
|
519 |
-
[INFO|callbacks.py:310] 2024-07-16 17:18:51,272 >> {'loss': 0.0097, 'learning_rate': 5.2997e-07, 'epoch': 3.92, 'throughput': 620.67}
|
520 |
-
|
521 |
-
[INFO|callbacks.py:310] 2024-07-16 17:19:02,251 >> {'loss': 0.0047, 'learning_rate': 5.0341e-07, 'epoch': 3.95, 'throughput': 620.62}
|
522 |
-
|
523 |
-
[INFO|callbacks.py:310] 2024-07-16 17:19:13,213 >> {'loss': 0.0081, 'learning_rate': 4.7746e-07, 'epoch': 3.97, 'throughput': 620.72}
|
524 |
-
|
525 |
-
[INFO|callbacks.py:310] 2024-07-16 17:19:24,189 >> {'loss': 0.0018, 'learning_rate': 4.5212e-07, 'epoch': 4.00, 'throughput': 620.95}
|
526 |
-
|
527 |
-
[INFO|callbacks.py:310] 2024-07-16 17:19:35,181 >> {'loss': 0.0053, 'learning_rate': 4.2741e-07, 'epoch': 4.03, 'throughput': 620.97}
|
528 |
-
|
529 |
-
[INFO|callbacks.py:310] 2024-07-16 17:19:46,170 >> {'loss': 0.0005, 'learning_rate': 4.0332e-07, 'epoch': 4.05, 'throughput': 621.01}
|
530 |
-
|
531 |
-
[INFO|callbacks.py:310] 2024-07-16 17:19:57,184 >> {'loss': 0.0001, 'learning_rate': 3.7988e-07, 'epoch': 4.08, 'throughput': 620.88}
|
532 |
-
|
533 |
-
[INFO|callbacks.py:310] 2024-07-16 17:20:08,184 >> {'loss': 0.0018, 'learning_rate': 3.5708e-07, 'epoch': 4.10, 'throughput': 620.79}
|
534 |
-
|
535 |
-
[INFO|callbacks.py:310] 2024-07-16 17:20:19,180 >> {'loss': 0.0010, 'learning_rate': 3.3494e-07, 'epoch': 4.13, 'throughput': 620.68}
|
536 |
-
|
537 |
-
[INFO|callbacks.py:310] 2024-07-16 17:20:30,173 >> {'loss': 0.0001, 'learning_rate': 3.1345e-07, 'epoch': 4.15, 'throughput': 620.79}
|
538 |
-
|
539 |
-
[INFO|callbacks.py:310] 2024-07-16 17:20:41,136 >> {'loss': 0.0012, 'learning_rate': 2.9263e-07, 'epoch': 4.18, 'throughput': 620.89}
|
540 |
-
|
541 |
-
[INFO|callbacks.py:310] 2024-07-16 17:20:52,115 >> {'loss': 0.0001, 'learning_rate': 2.7248e-07, 'epoch': 4.21, 'throughput': 620.82}
|
542 |
-
|
543 |
-
[INFO|callbacks.py:310] 2024-07-16 17:21:03,096 >> {'loss': 0.0002, 'learning_rate': 2.5301e-07, 'epoch': 4.23, 'throughput': 620.76}
|
544 |
-
|
545 |
-
[INFO|callbacks.py:310] 2024-07-16 17:21:14,072 >> {'loss': 0.0001, 'learning_rate': 2.3423e-07, 'epoch': 4.26, 'throughput': 620.84}
|
546 |
-
|
547 |
-
[INFO|callbacks.py:310] 2024-07-16 17:21:25,062 >> {'loss': 0.0003, 'learning_rate': 2.1614e-07, 'epoch': 4.28, 'throughput': 620.65}
|
548 |
-
|
549 |
-
[INFO|callbacks.py:310] 2024-07-16 17:21:36,062 >> {'loss': 0.0001, 'learning_rate': 1.9874e-07, 'epoch': 4.31, 'throughput': 620.79}
|
550 |
-
|
551 |
-
[INFO|callbacks.py:310] 2024-07-16 17:21:47,070 >> {'loss': 0.0007, 'learning_rate': 1.8204e-07, 'epoch': 4.34, 'throughput': 620.70}
|
552 |
-
|
553 |
-
[INFO|callbacks.py:310] 2024-07-16 17:21:58,075 >> {'loss': 0.0003, 'learning_rate': 1.6605e-07, 'epoch': 4.36, 'throughput': 620.56}
|
554 |
-
|
555 |
-
[INFO|callbacks.py:310] 2024-07-16 17:22:09,064 >> {'loss': 0.0001, 'learning_rate': 1.5077e-07, 'epoch': 4.39, 'throughput': 620.70}
|
556 |
-
|
557 |
-
[INFO|callbacks.py:310] 2024-07-16 17:22:20,044 >> {'loss': 0.0008, 'learning_rate': 1.3620e-07, 'epoch': 4.41, 'throughput': 620.90}
|
558 |
-
|
559 |
-
[INFO|callbacks.py:310] 2024-07-16 17:22:31,005 >> {'loss': 0.0001, 'learning_rate': 1.2236e-07, 'epoch': 4.44, 'throughput': 620.92}
|
560 |
-
|
561 |
-
[INFO|callbacks.py:310] 2024-07-16 17:22:41,991 >> {'loss': 0.0004, 'learning_rate': 1.0924e-07, 'epoch': 4.46, 'throughput': 621.02}
|
562 |
-
|
563 |
-
[INFO|callbacks.py:310] 2024-07-16 17:22:52,983 >> {'loss': 0.0001, 'learning_rate': 9.6846e-08, 'epoch': 4.49, 'throughput': 620.99}
|
564 |
-
|
565 |
-
[INFO|callbacks.py:310] 2024-07-16 17:23:03,992 >> {'loss': 0.0005, 'learning_rate': 8.5185e-08, 'epoch': 4.52, 'throughput': 621.03}
|
566 |
-
|
567 |
-
[INFO|callbacks.py:310] 2024-07-16 17:23:14,988 >> {'loss': 0.0076, 'learning_rate': 7.4261e-08, 'epoch': 4.54, 'throughput': 621.25}
|
568 |
-
|
569 |
-
[INFO|callbacks.py:310] 2024-07-16 17:23:26,004 >> {'loss': 0.0004, 'learning_rate': 6.4075e-08, 'epoch': 4.57, 'throughput': 621.12}
|
570 |
-
|
571 |
-
[INFO|callbacks.py:310] 2024-07-16 17:23:37,007 >> {'loss': 0.0040, 'learning_rate': 5.4631e-08, 'epoch': 4.59, 'throughput': 621.16}
|
572 |
-
|
573 |
-
[INFO|callbacks.py:310] 2024-07-16 17:23:47,992 >> {'loss': 0.0001, 'learning_rate': 4.5932e-08, 'epoch': 4.62, 'throughput': 621.22}
|
574 |
-
|
575 |
-
[INFO|callbacks.py:310] 2024-07-16 17:23:58,970 >> {'loss': 0.0005, 'learning_rate': 3.7981e-08, 'epoch': 4.65, 'throughput': 621.17}
|
576 |
-
|
577 |
-
[INFO|callbacks.py:310] 2024-07-16 17:24:09,940 >> {'loss': 0.0002, 'learning_rate': 3.0779e-08, 'epoch': 4.67, 'throughput': 621.23}
|
578 |
-
|
579 |
-
[INFO|callbacks.py:310] 2024-07-16 17:24:20,909 >> {'loss': 0.0001, 'learning_rate': 2.4330e-08, 'epoch': 4.70, 'throughput': 621.25}
|
580 |
|
581 |
-
[INFO|
|
582 |
|
583 |
-
[INFO|
|
|
|
584 |
|
585 |
-
[INFO|
|
586 |
|
587 |
-
[INFO|
|
588 |
|
589 |
-
[
|
590 |
|
591 |
-
|
592 |
|
593 |
-
|
594 |
|
595 |
-
|
596 |
|
597 |
-
|
598 |
|
599 |
-
|
600 |
|
601 |
-
|
602 |
|
603 |
-
|
604 |
|
605 |
-
|
606 |
|
607 |
-
|
608 |
|
609 |
-
|
610 |
|
611 |
-
|
612 |
|
|
|
613 |
|
|
|
614 |
|
615 |
-
|
616 |
|
617 |
-
|
618 |
|
619 |
-
|
620 |
|
621 |
-
|
622 |
|
623 |
-
|
624 |
|
625 |
-
|
626 |
|
627 |
-
|
628 |
|
629 |
-
|
630 |
|
631 |
-
[INFO|
|
632 |
-
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
633 |
|
|
|
1 |
+
[INFO|parser.py:325] 2024-07-16 17:28:19,793 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: True, compute dtype: None
|
2 |
|
3 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 17:28:19,796 >> loading file tokenizer.model
|
4 |
|
5 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 17:28:19,796 >> loading file tokenizer.json
|
6 |
|
7 |
+
07/16/2024 17:28:19 - INFO - llamafactory.hparams.parser - Process rank: 1, device: cuda:1, n_gpu: 1, distributed training: True, compute dtype: None
|
8 |
|
9 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 17:28:19,796 >> loading file added_tokens.json
|
10 |
|
11 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 17:28:19,796 >> loading file special_tokens_map.json
|
12 |
|
13 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 17:28:19,796 >> loading file tokenizer_config.json
|
14 |
|
15 |
+
[INFO|loader.py:50] 2024-07-16 17:28:19,846 >> Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
16 |
|
17 |
+
07/16/2024 17:28:19 - INFO - llamafactory.hparams.parser - Process rank: 7, device: cuda:7, n_gpu: 1, distributed training: True, compute dtype: None
|
18 |
|
19 |
+
07/16/2024 17:28:19 - INFO - llamafactory.hparams.parser - Process rank: 2, device: cuda:2, n_gpu: 1, distributed training: True, compute dtype: None
|
20 |
|
21 |
+
07/16/2024 17:28:19 - INFO - llamafactory.hparams.parser - Process rank: 4, device: cuda:4, n_gpu: 1, distributed training: True, compute dtype: None
|
22 |
|
23 |
+
07/16/2024 17:28:19 - INFO - llamafactory.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1, distributed training: True, compute dtype: None
|
24 |
|
25 |
+
07/16/2024 17:28:19 - INFO - llamafactory.hparams.parser - Process rank: 5, device: cuda:5, n_gpu: 1, distributed training: True, compute dtype: None
|
26 |
|
27 |
+
07/16/2024 17:28:19 - INFO - llamafactory.hparams.parser - Process rank: 6, device: cuda:6, n_gpu: 1, distributed training: True, compute dtype: None
|
28 |
|
29 |
+
07/16/2024 17:28:20 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
30 |
|
31 |
+
07/16/2024 17:28:20 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
32 |
|
33 |
+
07/16/2024 17:28:20 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
34 |
|
35 |
+
07/16/2024 17:28:20 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
36 |
|
37 |
+
07/16/2024 17:28:20 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
38 |
|
39 |
+
07/16/2024 17:28:20 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
40 |
|
41 |
+
07/16/2024 17:28:20 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
42 |
|
43 |
+
[INFO|configuration_utils.py:731] 2024-07-16 17:28:22,109 >> loading configuration file saves/LLaMA2-7B-Chat/full/train_2024-07-16-16-48-49_llama2_2/config.json
|
44 |
|
45 |
+
[INFO|configuration_utils.py:800] 2024-07-16 17:28:22,111 >> Model config LlamaConfig {
|
46 |
+
"_name_or_path": "saves/LLaMA2-7B-Chat/full/train_2024-07-16-16-48-49_llama2_2",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
"architectures": [
|
48 |
"LlamaForCausalLM"
|
49 |
],
|
|
|
66 |
"rope_scaling": null,
|
67 |
"rope_theta": 10000.0,
|
68 |
"tie_word_embeddings": false,
|
69 |
+
"torch_dtype": "bfloat16",
|
70 |
"transformers_version": "4.42.3",
|
71 |
+
"use_cache": false,
|
72 |
"vocab_size": 32000
|
73 |
}
|
74 |
|
75 |
|
76 |
+
[INFO|patcher.py:81] 2024-07-16 17:28:22,111 >> Using KV cache for faster generation.
|
77 |
+
|
78 |
+
[INFO|modeling_utils.py:3553] 2024-07-16 17:28:22,134 >> loading weights file saves/LLaMA2-7B-Chat/full/train_2024-07-16-16-48-49_llama2_2/model.safetensors.index.json
|
79 |
|
80 |
+
[INFO|modeling_utils.py:1531] 2024-07-16 17:28:22,134 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
|
81 |
|
82 |
+
[INFO|configuration_utils.py:1000] 2024-07-16 17:28:22,135 >> Generate config GenerationConfig {
|
83 |
"bos_token_id": 1,
|
84 |
"eos_token_id": 2
|
85 |
}
|
86 |
|
87 |
|
88 |
+
07/16/2024 17:28:22 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
89 |
|
90 |
+
07/16/2024 17:28:22 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
91 |
|
92 |
+
07/16/2024 17:28:22 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
93 |
+
|
94 |
+
07/16/2024 17:28:22 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
95 |
+
|
96 |
+
07/16/2024 17:28:22 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
97 |
+
|
98 |
+
07/16/2024 17:28:22 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
99 |
+
|
100 |
+
07/16/2024 17:28:22 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
101 |
+
|
102 |
+
[INFO|modeling_utils.py:4364] 2024-07-16 17:28:25,435 >> All model checkpoint weights were used when initializing LlamaForCausalLM.
|
103 |
+
|
104 |
+
|
105 |
+
[INFO|modeling_utils.py:4372] 2024-07-16 17:28:25,435 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at saves/LLaMA2-7B-Chat/full/train_2024-07-16-16-48-49_llama2_2.
|
106 |
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
107 |
|
108 |
+
[INFO|configuration_utils.py:953] 2024-07-16 17:28:25,439 >> loading configuration file saves/LLaMA2-7B-Chat/full/train_2024-07-16-16-48-49_llama2_2/generation_config.json
|
109 |
|
110 |
+
[INFO|configuration_utils.py:1000] 2024-07-16 17:28:25,439 >> Generate config GenerationConfig {
|
111 |
"bos_token_id": 1,
|
112 |
"do_sample": true,
|
113 |
"eos_token_id": 2,
|
|
|
118 |
}
|
119 |
|
120 |
|
121 |
+
[INFO|attention.py:80] 2024-07-16 17:28:25,446 >> Using torch SDPA for faster training and inference.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
+
[INFO|loader.py:196] 2024-07-16 17:28:25,450 >> all params: 6,738,415,616
|
124 |
|
125 |
+
[INFO|trainer.py:3788] 2024-07-16 17:28:25,557 >>
|
126 |
+
***** Running Prediction *****
|
127 |
|
128 |
+
[INFO|trainer.py:3790] 2024-07-16 17:28:25,557 >> Num examples = 1253
|
129 |
|
130 |
+
[INFO|trainer.py:3793] 2024-07-16 17:28:25,557 >> Batch size = 2
|
131 |
|
132 |
+
[WARNING|logging.py:328] 2024-07-16 17:28:26,227 >> We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
133 |
|
134 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
135 |
|
136 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
137 |
|
138 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
139 |
|
140 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.loader - all params: 6,738,415,616
|
141 |
|
142 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.loader - all params: 6,738,415,616
|
143 |
|
144 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
145 |
|
146 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.loader - all params: 6,738,415,616
|
147 |
|
148 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.loader - all params: 6,738,415,616
|
149 |
|
150 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
151 |
|
152 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.loader - all params: 6,738,415,616
|
153 |
|
154 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
155 |
|
156 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.loader - all params: 6,738,415,616
|
157 |
|
158 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
159 |
|
160 |
+
07/16/2024 17:28:26 - INFO - llamafactory.model.loader - all params: 6,738,415,616
|
161 |
|
162 |
+
07/16/2024 17:28:27 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
163 |
|
164 |
+
07/16/2024 17:28:27 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
165 |
|
166 |
+
07/16/2024 17:28:27 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
167 |
|
168 |
+
07/16/2024 17:28:27 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
169 |
|
170 |
+
07/16/2024 17:28:27 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
171 |
|
172 |
+
07/16/2024 17:28:27 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
173 |
|
174 |
+
07/16/2024 17:28:27 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
175 |
|
176 |
+
[INFO|trainer.py:127] 2024-07-16 17:28:35,754 >> Saving prediction results to saves/LLaMA2-7B-Chat/full/eval_2024-07-16-17-27-37/generated_predictions.jsonl
|
|
|
177 |
|
trainer_log.jsonl
CHANGED
@@ -1,191 +1,15 @@
|
|
1 |
-
{"current_steps":
|
2 |
-
{"current_steps":
|
3 |
-
{"current_steps":
|
4 |
-
{"current_steps":
|
5 |
-
{"current_steps":
|
6 |
-
{"current_steps":
|
7 |
-
{"current_steps":
|
8 |
-
{"current_steps":
|
9 |
-
{"current_steps":
|
10 |
-
{"current_steps":
|
11 |
-
{"current_steps":
|
12 |
-
{"current_steps":
|
13 |
-
{"current_steps":
|
14 |
-
{"current_steps":
|
15 |
-
{"current_steps":
|
16 |
-
{"current_steps": 16, "total_steps": 190, "loss": 0.202, "learning_rate": 4.986304738420684e-06, "epoch": 0.4129032258064516, "percentage": 8.42, "elapsed_time": "0:02:57", "remaining_time": "0:32:11", "throughput": "618.05", "total_tokens": 109792}
|
17 |
-
{"current_steps": 17, "total_steps": 190, "loss": 0.1981, "learning_rate": 4.981365379103306e-06, "epoch": 0.43870967741935485, "percentage": 8.95, "elapsed_time": "0:03:08", "remaining_time": "0:31:59", "throughput": "617.73", "total_tokens": 116528}
|
18 |
-
{"current_steps": 18, "total_steps": 190, "loss": 0.1517, "learning_rate": 4.975670171853926e-06, "epoch": 0.4645161290322581, "percentage": 9.47, "elapsed_time": "0:03:19", "remaining_time": "0:31:47", "throughput": "617.10", "total_tokens": 123200}
|
19 |
-
{"current_steps": 19, "total_steps": 190, "loss": 0.4335, "learning_rate": 4.9692208514878445e-06, "epoch": 0.49032258064516127, "percentage": 10.0, "elapsed_time": "0:03:30", "remaining_time": "0:31:35", "throughput": "617.28", "total_tokens": 130032}
|
20 |
-
{"current_steps": 20, "total_steps": 190, "loss": 0.3609, "learning_rate": 4.962019382530521e-06, "epoch": 0.5161290322580645, "percentage": 10.53, "elapsed_time": "0:03:41", "remaining_time": "0:31:24", "throughput": "617.29", "total_tokens": 136832}
|
21 |
-
{"current_steps": 21, "total_steps": 190, "loss": 0.1708, "learning_rate": 4.9540679586191605e-06, "epoch": 0.5419354838709678, "percentage": 11.05, "elapsed_time": "0:03:52", "remaining_time": "0:31:12", "throughput": "618.54", "total_tokens": 143904}
|
22 |
-
{"current_steps": 22, "total_steps": 190, "loss": 0.2277, "learning_rate": 4.9453690018345144e-06, "epoch": 0.567741935483871, "percentage": 11.58, "elapsed_time": "0:04:03", "remaining_time": "0:31:00", "throughput": "617.70", "total_tokens": 150496}
|
23 |
-
{"current_steps": 23, "total_steps": 190, "loss": 0.3437, "learning_rate": 4.935925161963089e-06, "epoch": 0.5935483870967742, "percentage": 12.11, "elapsed_time": "0:04:14", "remaining_time": "0:30:49", "throughput": "617.93", "total_tokens": 157360}
|
24 |
-
{"current_steps": 24, "total_steps": 190, "loss": 0.2229, "learning_rate": 4.925739315689991e-06, "epoch": 0.6193548387096774, "percentage": 12.63, "elapsed_time": "0:04:25", "remaining_time": "0:30:37", "throughput": "619.02", "total_tokens": 164464}
|
25 |
-
{"current_steps": 25, "total_steps": 190, "loss": 0.1242, "learning_rate": 4.914814565722671e-06, "epoch": 0.6451612903225806, "percentage": 13.16, "elapsed_time": "0:04:36", "remaining_time": "0:30:26", "throughput": "617.82", "total_tokens": 170960}
|
26 |
-
{"current_steps": 26, "total_steps": 190, "loss": 0.2117, "learning_rate": 4.903154239845798e-06, "epoch": 0.6709677419354839, "percentage": 13.68, "elapsed_time": "0:04:47", "remaining_time": "0:30:15", "throughput": "617.94", "total_tokens": 177808}
|
27 |
-
{"current_steps": 27, "total_steps": 190, "loss": 0.2706, "learning_rate": 4.890761889907589e-06, "epoch": 0.6967741935483871, "percentage": 14.21, "elapsed_time": "0:04:58", "remaining_time": "0:30:03", "throughput": "618.70", "total_tokens": 184848}
|
28 |
-
{"current_steps": 28, "total_steps": 190, "loss": 0.2084, "learning_rate": 4.8776412907378845e-06, "epoch": 0.7225806451612903, "percentage": 14.74, "elapsed_time": "0:05:09", "remaining_time": "0:29:52", "throughput": "618.27", "total_tokens": 191536}
|
29 |
-
{"current_steps": 29, "total_steps": 190, "loss": 0.0981, "learning_rate": 4.863796438998293e-06, "epoch": 0.7483870967741936, "percentage": 15.26, "elapsed_time": "0:05:20", "remaining_time": "0:29:40", "throughput": "618.39", "total_tokens": 198368}
|
30 |
-
{"current_steps": 30, "total_steps": 190, "loss": 0.16, "learning_rate": 4.849231551964771e-06, "epoch": 0.7741935483870968, "percentage": 15.79, "elapsed_time": "0:05:31", "remaining_time": "0:29:29", "throughput": "618.50", "total_tokens": 205200}
|
31 |
-
{"current_steps": 31, "total_steps": 190, "loss": 0.1614, "learning_rate": 4.833951066243004e-06, "epoch": 0.8, "percentage": 16.32, "elapsed_time": "0:05:42", "remaining_time": "0:29:18", "throughput": "617.80", "total_tokens": 211760}
|
32 |
-
{"current_steps": 32, "total_steps": 190, "loss": 0.1742, "learning_rate": 4.817959636416969e-06, "epoch": 0.8258064516129032, "percentage": 16.84, "elapsed_time": "0:05:53", "remaining_time": "0:29:06", "throughput": "617.47", "total_tokens": 218448}
|
33 |
-
{"current_steps": 33, "total_steps": 190, "loss": 0.1107, "learning_rate": 4.801262133631101e-06, "epoch": 0.8516129032258064, "percentage": 17.37, "elapsed_time": "0:06:04", "remaining_time": "0:28:55", "throughput": "617.90", "total_tokens": 225408}
|
34 |
-
{"current_steps": 34, "total_steps": 190, "loss": 0.0822, "learning_rate": 4.783863644106502e-06, "epoch": 0.8774193548387097, "percentage": 17.89, "elapsed_time": "0:06:15", "remaining_time": "0:28:44", "throughput": "617.42", "total_tokens": 232032}
|
35 |
-
{"current_steps": 35, "total_steps": 190, "loss": 0.1873, "learning_rate": 4.765769467591626e-06, "epoch": 0.9032258064516129, "percentage": 18.42, "elapsed_time": "0:06:26", "remaining_time": "0:28:33", "throughput": "617.01", "total_tokens": 238672}
|
36 |
-
{"current_steps": 36, "total_steps": 190, "loss": 0.2375, "learning_rate": 4.746985115747918e-06, "epoch": 0.9290322580645162, "percentage": 18.95, "elapsed_time": "0:06:37", "remaining_time": "0:28:21", "throughput": "616.94", "total_tokens": 245456}
|
37 |
-
{"current_steps": 37, "total_steps": 190, "loss": 0.2667, "learning_rate": 4.72751631047092e-06, "epoch": 0.9548387096774194, "percentage": 19.47, "elapsed_time": "0:06:48", "remaining_time": "0:28:10", "throughput": "617.73", "total_tokens": 252576}
|
38 |
-
{"current_steps": 38, "total_steps": 190, "loss": 0.1547, "learning_rate": 4.707368982147318e-06, "epoch": 0.9806451612903225, "percentage": 20.0, "elapsed_time": "0:06:59", "remaining_time": "0:27:59", "throughput": "618.14", "total_tokens": 259536}
|
39 |
-
{"current_steps": 39, "total_steps": 190, "loss": 0.1662, "learning_rate": 4.68654926784849e-06, "epoch": 1.0064516129032257, "percentage": 20.53, "elapsed_time": "0:07:10", "remaining_time": "0:27:48", "throughput": "618.69", "total_tokens": 266560}
|
40 |
-
{"current_steps": 40, "total_steps": 190, "loss": 0.0808, "learning_rate": 4.665063509461098e-06, "epoch": 1.032258064516129, "percentage": 21.05, "elapsed_time": "0:07:21", "remaining_time": "0:27:36", "throughput": "618.41", "total_tokens": 273248}
|
41 |
-
{"current_steps": 41, "total_steps": 190, "loss": 0.0884, "learning_rate": 4.642918251755281e-06, "epoch": 1.0580645161290323, "percentage": 21.58, "elapsed_time": "0:07:32", "remaining_time": "0:27:25", "throughput": "618.04", "total_tokens": 279888}
|
42 |
-
{"current_steps": 42, "total_steps": 190, "loss": 0.0883, "learning_rate": 4.620120240391065e-06, "epoch": 1.0838709677419356, "percentage": 22.11, "elapsed_time": "0:07:43", "remaining_time": "0:27:14", "throughput": "618.55", "total_tokens": 286944}
|
43 |
-
{"current_steps": 43, "total_steps": 190, "loss": 0.0562, "learning_rate": 4.596676419863561e-06, "epoch": 1.1096774193548387, "percentage": 22.63, "elapsed_time": "0:07:54", "remaining_time": "0:27:03", "throughput": "618.59", "total_tokens": 293792}
|
44 |
-
{"current_steps": 44, "total_steps": 190, "loss": 0.0856, "learning_rate": 4.572593931387604e-06, "epoch": 1.135483870967742, "percentage": 23.16, "elapsed_time": "0:08:05", "remaining_time": "0:26:52", "throughput": "618.38", "total_tokens": 300512}
|
45 |
-
{"current_steps": 45, "total_steps": 190, "loss": 0.0612, "learning_rate": 4.54788011072248e-06, "epoch": 1.1612903225806452, "percentage": 23.68, "elapsed_time": "0:08:16", "remaining_time": "0:26:41", "throughput": "618.26", "total_tokens": 307264}
|
46 |
-
{"current_steps": 46, "total_steps": 190, "loss": 0.0944, "learning_rate": 4.522542485937369e-06, "epoch": 1.1870967741935483, "percentage": 24.21, "elapsed_time": "0:08:27", "remaining_time": "0:26:30", "throughput": "618.35", "total_tokens": 314112}
|
47 |
-
{"current_steps": 47, "total_steps": 190, "loss": 0.0624, "learning_rate": 4.496588775118232e-06, "epoch": 1.2129032258064516, "percentage": 24.74, "elapsed_time": "0:08:39", "remaining_time": "0:26:19", "throughput": "618.23", "total_tokens": 320864}
|
48 |
-
{"current_steps": 48, "total_steps": 190, "loss": 0.0363, "learning_rate": 4.470026884016805e-06, "epoch": 1.238709677419355, "percentage": 25.26, "elapsed_time": "0:08:49", "remaining_time": "0:26:07", "throughput": "618.21", "total_tokens": 327648}
|
49 |
-
{"current_steps": 49, "total_steps": 190, "loss": 0.1039, "learning_rate": 4.442864903642428e-06, "epoch": 1.2645161290322582, "percentage": 25.79, "elapsed_time": "0:09:01", "remaining_time": "0:25:56", "throughput": "618.16", "total_tokens": 334432}
|
50 |
-
{"current_steps": 50, "total_steps": 190, "loss": 0.0488, "learning_rate": 4.415111107797445e-06, "epoch": 1.2903225806451613, "percentage": 26.32, "elapsed_time": "0:09:12", "remaining_time": "0:25:45", "throughput": "618.19", "total_tokens": 341248}
|
51 |
-
{"current_steps": 51, "total_steps": 190, "loss": 0.0613, "learning_rate": 4.386773950556931e-06, "epoch": 1.3161290322580645, "percentage": 26.84, "elapsed_time": "0:09:23", "remaining_time": "0:25:34", "throughput": "618.44", "total_tokens": 348192}
|
52 |
-
{"current_steps": 52, "total_steps": 190, "loss": 0.07, "learning_rate": 4.357862063693486e-06, "epoch": 1.3419354838709676, "percentage": 27.37, "elapsed_time": "0:09:34", "remaining_time": "0:25:23", "throughput": "618.12", "total_tokens": 354816}
|
53 |
-
{"current_steps": 53, "total_steps": 190, "loss": 0.0463, "learning_rate": 4.328384254047927e-06, "epoch": 1.367741935483871, "percentage": 27.89, "elapsed_time": "0:09:45", "remaining_time": "0:25:12", "throughput": "618.08", "total_tokens": 361600}
|
54 |
-
{"current_steps": 54, "total_steps": 190, "loss": 0.0671, "learning_rate": 4.2983495008466285e-06, "epoch": 1.3935483870967742, "percentage": 28.42, "elapsed_time": "0:09:56", "remaining_time": "0:25:01", "throughput": "618.15", "total_tokens": 368464}
|
55 |
-
{"current_steps": 55, "total_steps": 190, "loss": 0.0428, "learning_rate": 4.267766952966369e-06, "epoch": 1.4193548387096775, "percentage": 28.95, "elapsed_time": "0:10:07", "remaining_time": "0:24:50", "throughput": "618.43", "total_tokens": 375440}
|
56 |
-
{"current_steps": 56, "total_steps": 190, "loss": 0.0678, "learning_rate": 4.236645926147493e-06, "epoch": 1.4451612903225808, "percentage": 29.47, "elapsed_time": "0:10:18", "remaining_time": "0:24:38", "throughput": "618.43", "total_tokens": 382240}
|
57 |
-
{"current_steps": 57, "total_steps": 190, "loss": 0.0476, "learning_rate": 4.204995900156247e-06, "epoch": 1.4709677419354839, "percentage": 30.0, "elapsed_time": "0:10:29", "remaining_time": "0:24:27", "throughput": "618.38", "total_tokens": 389008}
|
58 |
-
{"current_steps": 58, "total_steps": 190, "loss": 0.0442, "learning_rate": 4.172826515897146e-06, "epoch": 1.4967741935483871, "percentage": 30.53, "elapsed_time": "0:10:40", "remaining_time": "0:24:16", "throughput": "618.82", "total_tokens": 396080}
|
59 |
-
{"current_steps": 59, "total_steps": 190, "loss": 0.0336, "learning_rate": 4.140147572476269e-06, "epoch": 1.5225806451612902, "percentage": 31.05, "elapsed_time": "0:10:51", "remaining_time": "0:24:05", "throughput": "619.09", "total_tokens": 403056}
|
60 |
-
{"current_steps": 60, "total_steps": 190, "loss": 0.046, "learning_rate": 4.106969024216348e-06, "epoch": 1.5483870967741935, "percentage": 31.58, "elapsed_time": "0:11:02", "remaining_time": "0:23:54", "throughput": "618.77", "total_tokens": 409664}
|
61 |
-
{"current_steps": 61, "total_steps": 190, "loss": 0.0416, "learning_rate": 4.073300977624594e-06, "epoch": 1.5741935483870968, "percentage": 32.11, "elapsed_time": "0:11:13", "remaining_time": "0:23:43", "throughput": "618.46", "total_tokens": 416272}
|
62 |
-
{"current_steps": 62, "total_steps": 190, "loss": 0.0649, "learning_rate": 4.039153688314146e-06, "epoch": 1.6, "percentage": 32.63, "elapsed_time": "0:11:24", "remaining_time": "0:23:32", "throughput": "618.87", "total_tokens": 423360}
|
63 |
-
{"current_steps": 63, "total_steps": 190, "loss": 0.0591, "learning_rate": 4.0045375578801216e-06, "epoch": 1.6258064516129034, "percentage": 33.16, "elapsed_time": "0:11:35", "remaining_time": "0:23:21", "throughput": "619.05", "total_tokens": 430304}
|
64 |
-
{"current_steps": 64, "total_steps": 190, "loss": 0.0318, "learning_rate": 3.969463130731183e-06, "epoch": 1.6516129032258065, "percentage": 33.68, "elapsed_time": "0:11:46", "remaining_time": "0:23:10", "throughput": "618.83", "total_tokens": 436960}
|
65 |
-
{"current_steps": 65, "total_steps": 190, "loss": 0.0462, "learning_rate": 3.933941090877615e-06, "epoch": 1.6774193548387095, "percentage": 34.21, "elapsed_time": "0:11:57", "remaining_time": "0:22:59", "throughput": "618.87", "total_tokens": 443792}
|
66 |
-
{"current_steps": 66, "total_steps": 190, "loss": 0.0465, "learning_rate": 3.897982258676867e-06, "epoch": 1.7032258064516128, "percentage": 34.74, "elapsed_time": "0:12:08", "remaining_time": "0:22:47", "throughput": "618.98", "total_tokens": 450672}
|
67 |
-
{"current_steps": 67, "total_steps": 190, "loss": 0.0316, "learning_rate": 3.861597587537568e-06, "epoch": 1.729032258064516, "percentage": 35.26, "elapsed_time": "0:12:19", "remaining_time": "0:22:36", "throughput": "619.15", "total_tokens": 457616}
|
68 |
-
{"current_steps": 68, "total_steps": 190, "loss": 0.1, "learning_rate": 3.824798160583012e-06, "epoch": 1.7548387096774194, "percentage": 35.79, "elapsed_time": "0:12:30", "remaining_time": "0:22:25", "throughput": "619.38", "total_tokens": 464592}
|
69 |
-
{"current_steps": 69, "total_steps": 190, "loss": 0.0711, "learning_rate": 3.787595187275136e-06, "epoch": 1.7806451612903227, "percentage": 36.32, "elapsed_time": "0:12:41", "remaining_time": "0:22:14", "throughput": "619.25", "total_tokens": 471328}
|
70 |
-
{"current_steps": 70, "total_steps": 190, "loss": 0.0494, "learning_rate": 3.7500000000000005e-06, "epoch": 1.8064516129032258, "percentage": 36.84, "elapsed_time": "0:12:52", "remaining_time": "0:22:03", "throughput": "619.69", "total_tokens": 478480}
|
71 |
-
{"current_steps": 71, "total_steps": 190, "loss": 0.0618, "learning_rate": 3.7120240506158433e-06, "epoch": 1.832258064516129, "percentage": 37.37, "elapsed_time": "0:13:03", "remaining_time": "0:21:52", "throughput": "619.64", "total_tokens": 485264}
|
72 |
-
{"current_steps": 72, "total_steps": 190, "loss": 0.0511, "learning_rate": 3.6736789069647273e-06, "epoch": 1.8580645161290321, "percentage": 37.89, "elapsed_time": "0:13:14", "remaining_time": "0:21:41", "throughput": "619.61", "total_tokens": 492064}
|
73 |
-
{"current_steps": 73, "total_steps": 190, "loss": 0.0464, "learning_rate": 3.634976249348867e-06, "epoch": 1.8838709677419354, "percentage": 38.42, "elapsed_time": "0:13:25", "remaining_time": "0:21:30", "throughput": "619.64", "total_tokens": 498896}
|
74 |
-
{"current_steps": 74, "total_steps": 190, "loss": 0.0331, "learning_rate": 3.595927866972694e-06, "epoch": 1.9096774193548387, "percentage": 38.95, "elapsed_time": "0:13:36", "remaining_time": "0:21:19", "throughput": "620.02", "total_tokens": 506016}
|
75 |
-
{"current_steps": 75, "total_steps": 190, "loss": 0.0706, "learning_rate": 3.556545654351749e-06, "epoch": 1.935483870967742, "percentage": 39.47, "elapsed_time": "0:13:47", "remaining_time": "0:21:08", "throughput": "620.04", "total_tokens": 512848}
|
76 |
-
{"current_steps": 76, "total_steps": 190, "loss": 0.0442, "learning_rate": 3.516841607689501e-06, "epoch": 1.9612903225806453, "percentage": 40.0, "elapsed_time": "0:13:58", "remaining_time": "0:20:57", "throughput": "620.14", "total_tokens": 519744}
|
77 |
-
{"current_steps": 77, "total_steps": 190, "loss": 0.042, "learning_rate": 3.476827821223184e-06, "epoch": 1.9870967741935484, "percentage": 40.53, "elapsed_time": "0:14:09", "remaining_time": "0:20:46", "throughput": "619.89", "total_tokens": 526352}
|
78 |
-
{"current_steps": 78, "total_steps": 190, "loss": 0.021, "learning_rate": 3.436516483539781e-06, "epoch": 2.0129032258064514, "percentage": 41.05, "elapsed_time": "0:14:20", "remaining_time": "0:20:35", "throughput": "619.84", "total_tokens": 533136}
|
79 |
-
{"current_steps": 79, "total_steps": 190, "loss": 0.0094, "learning_rate": 3.39591987386325e-06, "epoch": 2.0387096774193547, "percentage": 41.58, "elapsed_time": "0:14:31", "remaining_time": "0:20:23", "throughput": "619.91", "total_tokens": 540016}
|
80 |
-
{"current_steps": 80, "total_steps": 190, "loss": 0.0021, "learning_rate": 3.3550503583141726e-06, "epoch": 2.064516129032258, "percentage": 42.11, "elapsed_time": "0:14:42", "remaining_time": "0:20:12", "throughput": "620.26", "total_tokens": 547152}
|
81 |
-
{"current_steps": 81, "total_steps": 190, "loss": 0.0146, "learning_rate": 3.313920386142892e-06, "epoch": 2.0903225806451613, "percentage": 42.63, "elapsed_time": "0:14:53", "remaining_time": "0:20:01", "throughput": "620.13", "total_tokens": 553888}
|
82 |
-
{"current_steps": 82, "total_steps": 190, "loss": 0.0237, "learning_rate": 3.272542485937369e-06, "epoch": 2.1161290322580646, "percentage": 43.16, "elapsed_time": "0:15:04", "remaining_time": "0:19:50", "throughput": "620.48", "total_tokens": 561024}
|
83 |
-
{"current_steps": 83, "total_steps": 190, "loss": 0.0031, "learning_rate": 3.230929261806842e-06, "epoch": 2.141935483870968, "percentage": 43.68, "elapsed_time": "0:15:15", "remaining_time": "0:19:39", "throughput": "620.57", "total_tokens": 567936}
|
84 |
-
{"current_steps": 84, "total_steps": 190, "loss": 0.0034, "learning_rate": 3.189093389542498e-06, "epoch": 2.167741935483871, "percentage": 44.21, "elapsed_time": "0:15:26", "remaining_time": "0:19:28", "throughput": "620.54", "total_tokens": 574736}
|
85 |
-
{"current_steps": 85, "total_steps": 190, "loss": 0.0045, "learning_rate": 3.147047612756302e-06, "epoch": 2.193548387096774, "percentage": 44.74, "elapsed_time": "0:15:37", "remaining_time": "0:19:17", "throughput": "620.66", "total_tokens": 581680}
|
86 |
-
{"current_steps": 86, "total_steps": 190, "loss": 0.0031, "learning_rate": 3.1048047389991693e-06, "epoch": 2.2193548387096773, "percentage": 45.26, "elapsed_time": "0:15:48", "remaining_time": "0:19:06", "throughput": "620.62", "total_tokens": 588464}
|
87 |
-
{"current_steps": 87, "total_steps": 190, "loss": 0.0341, "learning_rate": 3.062377635859663e-06, "epoch": 2.2451612903225806, "percentage": 45.79, "elapsed_time": "0:15:59", "remaining_time": "0:18:55", "throughput": "620.63", "total_tokens": 595312}
|
88 |
-
{"current_steps": 88, "total_steps": 190, "loss": 0.0095, "learning_rate": 3.019779227044398e-06, "epoch": 2.270967741935484, "percentage": 46.32, "elapsed_time": "0:16:10", "remaining_time": "0:18:44", "throughput": "620.55", "total_tokens": 602080}
|
89 |
-
{"current_steps": 89, "total_steps": 190, "loss": 0.0459, "learning_rate": 2.9770224884413625e-06, "epoch": 2.296774193548387, "percentage": 46.84, "elapsed_time": "0:16:21", "remaining_time": "0:18:33", "throughput": "620.80", "total_tokens": 609168}
|
90 |
-
{"current_steps": 90, "total_steps": 190, "loss": 0.0104, "learning_rate": 2.9341204441673267e-06, "epoch": 2.3225806451612905, "percentage": 47.37, "elapsed_time": "0:16:32", "remaining_time": "0:18:22", "throughput": "620.80", "total_tokens": 616000}
|
91 |
-
{"current_steps": 91, "total_steps": 190, "loss": 0.0201, "learning_rate": 2.8910861626005774e-06, "epoch": 2.3483870967741938, "percentage": 47.89, "elapsed_time": "0:16:43", "remaining_time": "0:18:11", "throughput": "620.51", "total_tokens": 622544}
|
92 |
-
{"current_steps": 92, "total_steps": 190, "loss": 0.0021, "learning_rate": 2.847932752400164e-06, "epoch": 2.3741935483870966, "percentage": 48.42, "elapsed_time": "0:16:54", "remaining_time": "0:18:00", "throughput": "620.75", "total_tokens": 629600}
|
93 |
-
{"current_steps": 93, "total_steps": 190, "loss": 0.043, "learning_rate": 2.804673358512869e-06, "epoch": 2.4, "percentage": 48.95, "elapsed_time": "0:17:05", "remaining_time": "0:17:49", "throughput": "620.64", "total_tokens": 636320}
|
94 |
-
{"current_steps": 94, "total_steps": 190, "loss": 0.0207, "learning_rate": 2.761321158169134e-06, "epoch": 2.425806451612903, "percentage": 49.47, "elapsed_time": "0:17:16", "remaining_time": "0:17:38", "throughput": "620.61", "total_tokens": 643136}
|
95 |
-
{"current_steps": 95, "total_steps": 190, "loss": 0.0148, "learning_rate": 2.717889356869146e-06, "epoch": 2.4516129032258065, "percentage": 50.0, "elapsed_time": "0:17:27", "remaining_time": "0:17:27", "throughput": "620.69", "total_tokens": 650048}
|
96 |
-
{"current_steps": 96, "total_steps": 190, "loss": 0.004, "learning_rate": 2.6743911843603134e-06, "epoch": 2.47741935483871, "percentage": 50.53, "elapsed_time": "0:17:38", "remaining_time": "0:17:16", "throughput": "620.55", "total_tokens": 656736}
|
97 |
-
{"current_steps": 97, "total_steps": 190, "loss": 0.0131, "learning_rate": 2.6308398906073603e-06, "epoch": 2.5032258064516126, "percentage": 51.05, "elapsed_time": "0:17:49", "remaining_time": "0:17:05", "throughput": "620.54", "total_tokens": 663552}
|
98 |
-
{"current_steps": 98, "total_steps": 190, "loss": 0.0455, "learning_rate": 2.587248741756253e-06, "epoch": 2.5290322580645164, "percentage": 51.58, "elapsed_time": "0:18:00", "remaining_time": "0:16:54", "throughput": "620.37", "total_tokens": 670224}
|
99 |
-
{"current_steps": 99, "total_steps": 190, "loss": 0.0031, "learning_rate": 2.543631016093209e-06, "epoch": 2.554838709677419, "percentage": 52.11, "elapsed_time": "0:18:11", "remaining_time": "0:16:43", "throughput": "620.27", "total_tokens": 676960}
|
100 |
-
{"current_steps": 100, "total_steps": 190, "loss": 0.0099, "learning_rate": 2.5e-06, "epoch": 2.5806451612903225, "percentage": 52.63, "elapsed_time": "0:18:22", "remaining_time": "0:16:32", "throughput": "620.49", "total_tokens": 684016}
|
101 |
-
{"current_steps": 101, "total_steps": 190, "loss": 0.0797, "learning_rate": 2.4563689839067913e-06, "epoch": 2.606451612903226, "percentage": 53.16, "elapsed_time": "0:18:33", "remaining_time": "0:16:21", "throughput": "620.49", "total_tokens": 690832}
|
102 |
-
{"current_steps": 102, "total_steps": 190, "loss": 0.0059, "learning_rate": 2.4127512582437486e-06, "epoch": 2.632258064516129, "percentage": 53.68, "elapsed_time": "0:18:44", "remaining_time": "0:16:10", "throughput": "620.63", "total_tokens": 697808}
|
103 |
-
{"current_steps": 103, "total_steps": 190, "loss": 0.0438, "learning_rate": 2.3691601093926406e-06, "epoch": 2.6580645161290324, "percentage": 54.21, "elapsed_time": "0:18:55", "remaining_time": "0:15:58", "throughput": "620.38", "total_tokens": 704352}
|
104 |
-
{"current_steps": 104, "total_steps": 190, "loss": 0.0149, "learning_rate": 2.325608815639687e-06, "epoch": 2.6838709677419352, "percentage": 54.74, "elapsed_time": "0:19:06", "remaining_time": "0:15:47", "throughput": "620.63", "total_tokens": 711456}
|
105 |
-
{"current_steps": 105, "total_steps": 190, "loss": 0.0126, "learning_rate": 2.2821106431308546e-06, "epoch": 2.709677419354839, "percentage": 55.26, "elapsed_time": "0:19:17", "remaining_time": "0:15:36", "throughput": "620.57", "total_tokens": 718224}
|
106 |
-
{"current_steps": 106, "total_steps": 190, "loss": 0.0255, "learning_rate": 2.238678841830867e-06, "epoch": 2.735483870967742, "percentage": 55.79, "elapsed_time": "0:19:28", "remaining_time": "0:15:25", "throughput": "620.46", "total_tokens": 724928}
|
107 |
-
{"current_steps": 107, "total_steps": 190, "loss": 0.0048, "learning_rate": 2.195326641487132e-06, "epoch": 2.761290322580645, "percentage": 56.32, "elapsed_time": "0:19:39", "remaining_time": "0:15:14", "throughput": "620.34", "total_tokens": 731616}
|
108 |
-
{"current_steps": 108, "total_steps": 190, "loss": 0.0142, "learning_rate": 2.1520672475998374e-06, "epoch": 2.7870967741935484, "percentage": 56.84, "elapsed_time": "0:19:50", "remaining_time": "0:15:03", "throughput": "620.20", "total_tokens": 738272}
|
109 |
-
{"current_steps": 109, "total_steps": 190, "loss": 0.0193, "learning_rate": 2.1089138373994226e-06, "epoch": 2.8129032258064517, "percentage": 57.37, "elapsed_time": "0:20:01", "remaining_time": "0:14:52", "throughput": "620.23", "total_tokens": 745104}
|
110 |
-
{"current_steps": 110, "total_steps": 190, "loss": 0.0055, "learning_rate": 2.0658795558326745e-06, "epoch": 2.838709677419355, "percentage": 57.89, "elapsed_time": "0:20:12", "remaining_time": "0:14:41", "throughput": "620.32", "total_tokens": 752016}
|
111 |
-
{"current_steps": 111, "total_steps": 190, "loss": 0.0144, "learning_rate": 2.022977511558638e-06, "epoch": 2.864516129032258, "percentage": 58.42, "elapsed_time": "0:20:23", "remaining_time": "0:14:30", "throughput": "620.23", "total_tokens": 758720}
|
112 |
-
{"current_steps": 112, "total_steps": 190, "loss": 0.0272, "learning_rate": 1.9802207729556023e-06, "epoch": 2.8903225806451616, "percentage": 58.95, "elapsed_time": "0:20:34", "remaining_time": "0:14:19", "throughput": "620.22", "total_tokens": 765520}
|
113 |
-
{"current_steps": 113, "total_steps": 190, "loss": 0.0101, "learning_rate": 1.937622364140338e-06, "epoch": 2.9161290322580644, "percentage": 59.47, "elapsed_time": "0:20:45", "remaining_time": "0:14:08", "throughput": "620.18", "total_tokens": 772288}
|
114 |
-
{"current_steps": 114, "total_steps": 190, "loss": 0.0109, "learning_rate": 1.895195261000831e-06, "epoch": 2.9419354838709677, "percentage": 60.0, "elapsed_time": "0:20:56", "remaining_time": "0:13:57", "throughput": "620.51", "total_tokens": 779520}
|
115 |
-
{"current_steps": 115, "total_steps": 190, "loss": 0.018, "learning_rate": 1.852952387243698e-06, "epoch": 2.967741935483871, "percentage": 60.53, "elapsed_time": "0:21:07", "remaining_time": "0:13:46", "throughput": "620.55", "total_tokens": 786400}
|
116 |
-
{"current_steps": 116, "total_steps": 190, "loss": 0.0141, "learning_rate": 1.8109066104575023e-06, "epoch": 2.9935483870967743, "percentage": 61.05, "elapsed_time": "0:21:18", "remaining_time": "0:13:35", "throughput": "620.38", "total_tokens": 793008}
|
117 |
-
{"current_steps": 117, "total_steps": 190, "loss": 0.0057, "learning_rate": 1.7690707381931585e-06, "epoch": 3.0193548387096776, "percentage": 61.58, "elapsed_time": "0:21:29", "remaining_time": "0:13:24", "throughput": "620.36", "total_tokens": 799808}
|
118 |
-
{"current_steps": 118, "total_steps": 190, "loss": 0.0063, "learning_rate": 1.7274575140626318e-06, "epoch": 3.0451612903225804, "percentage": 62.11, "elapsed_time": "0:21:40", "remaining_time": "0:13:13", "throughput": "620.33", "total_tokens": 806576}
|
119 |
-
{"current_steps": 119, "total_steps": 190, "loss": 0.0138, "learning_rate": 1.686079613857109e-06, "epoch": 3.0709677419354837, "percentage": 62.63, "elapsed_time": "0:21:51", "remaining_time": "0:13:02", "throughput": "620.27", "total_tokens": 813312}
|
120 |
-
{"current_steps": 120, "total_steps": 190, "loss": 0.0011, "learning_rate": 1.6449496416858285e-06, "epoch": 3.096774193548387, "percentage": 63.16, "elapsed_time": "0:22:02", "remaining_time": "0:12:51", "throughput": "619.97", "total_tokens": 819728}
|
121 |
-
{"current_steps": 121, "total_steps": 190, "loss": 0.0006, "learning_rate": 1.6040801261367494e-06, "epoch": 3.1225806451612903, "percentage": 63.68, "elapsed_time": "0:22:13", "remaining_time": "0:12:40", "throughput": "619.94", "total_tokens": 826496}
|
122 |
-
{"current_steps": 122, "total_steps": 190, "loss": 0.0055, "learning_rate": 1.56348351646022e-06, "epoch": 3.1483870967741936, "percentage": 64.21, "elapsed_time": "0:22:24", "remaining_time": "0:12:29", "throughput": "619.97", "total_tokens": 833360}
|
123 |
-
{"current_steps": 123, "total_steps": 190, "loss": 0.0011, "learning_rate": 1.5231721787768162e-06, "epoch": 3.174193548387097, "percentage": 64.74, "elapsed_time": "0:22:35", "remaining_time": "0:12:18", "throughput": "619.90", "total_tokens": 840080}
|
124 |
-
{"current_steps": 124, "total_steps": 190, "loss": 0.0173, "learning_rate": 1.4831583923105e-06, "epoch": 3.2, "percentage": 65.26, "elapsed_time": "0:22:46", "remaining_time": "0:12:07", "throughput": "620.05", "total_tokens": 847104}
|
125 |
-
{"current_steps": 125, "total_steps": 190, "loss": 0.0027, "learning_rate": 1.443454345648252e-06, "epoch": 3.225806451612903, "percentage": 65.79, "elapsed_time": "0:22:57", "remaining_time": "0:11:56", "throughput": "619.89", "total_tokens": 853712}
|
126 |
-
{"current_steps": 126, "total_steps": 190, "loss": 0.0029, "learning_rate": 1.4040721330273063e-06, "epoch": 3.2516129032258063, "percentage": 66.32, "elapsed_time": "0:23:08", "remaining_time": "0:11:45", "throughput": "619.81", "total_tokens": 860400}
|
127 |
-
{"current_steps": 127, "total_steps": 190, "loss": 0.0003, "learning_rate": 1.3650237506511333e-06, "epoch": 3.2774193548387096, "percentage": 66.84, "elapsed_time": "0:23:19", "remaining_time": "0:11:34", "throughput": "619.97", "total_tokens": 867440}
|
128 |
-
{"current_steps": 128, "total_steps": 190, "loss": 0.0007, "learning_rate": 1.3263210930352737e-06, "epoch": 3.303225806451613, "percentage": 67.37, "elapsed_time": "0:23:30", "remaining_time": "0:11:23", "throughput": "619.98", "total_tokens": 874256}
|
129 |
-
{"current_steps": 129, "total_steps": 190, "loss": 0.008, "learning_rate": 1.2879759493841577e-06, "epoch": 3.329032258064516, "percentage": 67.89, "elapsed_time": "0:23:41", "remaining_time": "0:11:11", "throughput": "620.05", "total_tokens": 881168}
|
130 |
-
{"current_steps": 130, "total_steps": 190, "loss": 0.0004, "learning_rate": 1.2500000000000007e-06, "epoch": 3.3548387096774195, "percentage": 68.42, "elapsed_time": "0:23:52", "remaining_time": "0:11:00", "throughput": "620.16", "total_tokens": 888128}
|
131 |
-
{"current_steps": 131, "total_steps": 190, "loss": 0.0049, "learning_rate": 1.2124048127248644e-06, "epoch": 3.3806451612903228, "percentage": 68.95, "elapsed_time": "0:24:03", "remaining_time": "0:10:49", "throughput": "620.39", "total_tokens": 895296}
|
132 |
-
{"current_steps": 132, "total_steps": 190, "loss": 0.0012, "learning_rate": 1.1752018394169882e-06, "epoch": 3.4064516129032256, "percentage": 69.47, "elapsed_time": "0:24:14", "remaining_time": "0:10:38", "throughput": "620.30", "total_tokens": 901984}
|
133 |
-
{"current_steps": 133, "total_steps": 190, "loss": 0.0044, "learning_rate": 1.1384024124624324e-06, "epoch": 3.432258064516129, "percentage": 70.0, "elapsed_time": "0:24:25", "remaining_time": "0:10:27", "throughput": "620.50", "total_tokens": 909104}
|
134 |
-
{"current_steps": 134, "total_steps": 190, "loss": 0.0017, "learning_rate": 1.1020177413231334e-06, "epoch": 3.458064516129032, "percentage": 70.53, "elapsed_time": "0:24:36", "remaining_time": "0:10:16", "throughput": "620.57", "total_tokens": 916032}
|
135 |
-
{"current_steps": 135, "total_steps": 190, "loss": 0.0003, "learning_rate": 1.0660589091223854e-06, "epoch": 3.4838709677419355, "percentage": 71.05, "elapsed_time": "0:24:47", "remaining_time": "0:10:05", "throughput": "620.49", "total_tokens": 922736}
|
136 |
-
{"current_steps": 136, "total_steps": 190, "loss": 0.0099, "learning_rate": 1.0305368692688175e-06, "epoch": 3.509677419354839, "percentage": 71.58, "elapsed_time": "0:24:58", "remaining_time": "0:09:54", "throughput": "620.45", "total_tokens": 929472}
|
137 |
-
{"current_steps": 137, "total_steps": 190, "loss": 0.0068, "learning_rate": 9.95462442119879e-07, "epoch": 3.535483870967742, "percentage": 72.11, "elapsed_time": "0:25:09", "remaining_time": "0:09:43", "throughput": "620.34", "total_tokens": 936112}
|
138 |
-
{"current_steps": 138, "total_steps": 190, "loss": 0.0025, "learning_rate": 9.608463116858544e-07, "epoch": 3.5612903225806454, "percentage": 72.63, "elapsed_time": "0:25:20", "remaining_time": "0:09:32", "throughput": "620.33", "total_tokens": 942912}
|
139 |
-
{"current_steps": 139, "total_steps": 190, "loss": 0.0004, "learning_rate": 9.266990223754069e-07, "epoch": 3.587096774193548, "percentage": 73.16, "elapsed_time": "0:25:30", "remaining_time": "0:09:21", "throughput": "620.50", "total_tokens": 949984}
|
140 |
-
{"current_steps": 140, "total_steps": 190, "loss": 0.0101, "learning_rate": 8.930309757836517e-07, "epoch": 3.6129032258064515, "percentage": 73.68, "elapsed_time": "0:25:41", "remaining_time": "0:09:10", "throughput": "620.37", "total_tokens": 956608}
|
141 |
-
{"current_steps": 141, "total_steps": 190, "loss": 0.0068, "learning_rate": 8.598524275237321e-07, "epoch": 3.638709677419355, "percentage": 74.21, "elapsed_time": "0:25:52", "remaining_time": "0:08:59", "throughput": "620.41", "total_tokens": 963488}
|
142 |
-
{"current_steps": 142, "total_steps": 190, "loss": 0.0007, "learning_rate": 8.271734841028553e-07, "epoch": 3.664516129032258, "percentage": 74.74, "elapsed_time": "0:26:03", "remaining_time": "0:08:48", "throughput": "620.29", "total_tokens": 970128}
|
143 |
-
{"current_steps": 143, "total_steps": 190, "loss": 0.0161, "learning_rate": 7.950040998437541e-07, "epoch": 3.6903225806451614, "percentage": 75.26, "elapsed_time": "0:26:14", "remaining_time": "0:08:37", "throughput": "620.19", "total_tokens": 976800}
|
144 |
-
{"current_steps": 144, "total_steps": 190, "loss": 0.0115, "learning_rate": 7.633540738525066e-07, "epoch": 3.7161290322580647, "percentage": 75.79, "elapsed_time": "0:26:25", "remaining_time": "0:08:26", "throughput": "620.44", "total_tokens": 984000}
|
145 |
-
{"current_steps": 145, "total_steps": 190, "loss": 0.0052, "learning_rate": 7.322330470336314e-07, "epoch": 3.741935483870968, "percentage": 76.32, "elapsed_time": "0:26:36", "remaining_time": "0:08:15", "throughput": "620.49", "total_tokens": 990896}
|
146 |
-
{"current_steps": 146, "total_steps": 190, "loss": 0.0098, "learning_rate": 7.016504991533727e-07, "epoch": 3.767741935483871, "percentage": 76.84, "elapsed_time": "0:26:47", "remaining_time": "0:08:04", "throughput": "620.56", "total_tokens": 997824}
|
147 |
-
{"current_steps": 147, "total_steps": 190, "loss": 0.0005, "learning_rate": 6.716157459520739e-07, "epoch": 3.793548387096774, "percentage": 77.37, "elapsed_time": "0:26:58", "remaining_time": "0:07:53", "throughput": "620.74", "total_tokens": 1004928}
|
148 |
-
{"current_steps": 148, "total_steps": 190, "loss": 0.0012, "learning_rate": 6.421379363065142e-07, "epoch": 3.8193548387096774, "percentage": 77.89, "elapsed_time": "0:27:09", "remaining_time": "0:07:42", "throughput": "620.71", "total_tokens": 1011696}
|
149 |
-
{"current_steps": 149, "total_steps": 190, "loss": 0.0013, "learning_rate": 6.1322604944307e-07, "epoch": 3.8451612903225807, "percentage": 78.42, "elapsed_time": "0:27:20", "remaining_time": "0:07:31", "throughput": "620.61", "total_tokens": 1018368}
|
150 |
-
{"current_steps": 150, "total_steps": 190, "loss": 0.0003, "learning_rate": 5.848888922025553e-07, "epoch": 3.870967741935484, "percentage": 78.95, "elapsed_time": "0:27:31", "remaining_time": "0:07:20", "throughput": "620.76", "total_tokens": 1025424}
|
151 |
-
{"current_steps": 151, "total_steps": 190, "loss": 0.0026, "learning_rate": 5.571350963575728e-07, "epoch": 3.896774193548387, "percentage": 79.47, "elapsed_time": "0:27:42", "remaining_time": "0:07:09", "throughput": "620.61", "total_tokens": 1032016}
|
152 |
-
{"current_steps": 152, "total_steps": 190, "loss": 0.0097, "learning_rate": 5.299731159831953e-07, "epoch": 3.9225806451612906, "percentage": 80.0, "elapsed_time": "0:27:53", "remaining_time": "0:06:58", "throughput": "620.67", "total_tokens": 1038928}
|
153 |
-
{"current_steps": 153, "total_steps": 190, "loss": 0.0047, "learning_rate": 5.034112248817685e-07, "epoch": 3.9483870967741934, "percentage": 80.53, "elapsed_time": "0:28:04", "remaining_time": "0:06:47", "throughput": "620.62", "total_tokens": 1045664}
|
154 |
-
{"current_steps": 154, "total_steps": 190, "loss": 0.0081, "learning_rate": 4.774575140626317e-07, "epoch": 3.9741935483870967, "percentage": 81.05, "elapsed_time": "0:28:15", "remaining_time": "0:06:36", "throughput": "620.72", "total_tokens": 1052640}
|
155 |
-
{"current_steps": 155, "total_steps": 190, "loss": 0.0018, "learning_rate": 4.5211988927752026e-07, "epoch": 4.0, "percentage": 81.58, "elapsed_time": "0:28:26", "remaining_time": "0:06:25", "throughput": "620.95", "total_tokens": 1059840}
|
156 |
-
{"current_steps": 156, "total_steps": 190, "loss": 0.0053, "learning_rate": 4.27406068612396e-07, "epoch": 4.025806451612903, "percentage": 82.11, "elapsed_time": "0:28:37", "remaining_time": "0:06:14", "throughput": "620.97", "total_tokens": 1066704}
|
157 |
-
{"current_steps": 157, "total_steps": 190, "loss": 0.0005, "learning_rate": 4.033235801364402e-07, "epoch": 4.051612903225807, "percentage": 82.63, "elapsed_time": "0:28:48", "remaining_time": "0:06:03", "throughput": "621.01", "total_tokens": 1073600}
|
158 |
-
{"current_steps": 158, "total_steps": 190, "loss": 0.0001, "learning_rate": 3.798797596089351e-07, "epoch": 4.077419354838709, "percentage": 83.16, "elapsed_time": "0:28:59", "remaining_time": "0:05:52", "throughput": "620.88", "total_tokens": 1080208}
|
159 |
-
{"current_steps": 159, "total_steps": 190, "loss": 0.0018, "learning_rate": 3.5708174824471947e-07, "epoch": 4.103225806451613, "percentage": 83.68, "elapsed_time": "0:29:10", "remaining_time": "0:05:41", "throughput": "620.79", "total_tokens": 1086880}
|
160 |
-
{"current_steps": 160, "total_steps": 190, "loss": 0.001, "learning_rate": 3.3493649053890325e-07, "epoch": 4.129032258064516, "percentage": 84.21, "elapsed_time": "0:29:21", "remaining_time": "0:05:30", "throughput": "620.68", "total_tokens": 1093520}
|
161 |
-
{"current_steps": 161, "total_steps": 190, "loss": 0.0001, "learning_rate": 3.134507321515107e-07, "epoch": 4.15483870967742, "percentage": 84.74, "elapsed_time": "0:29:32", "remaining_time": "0:05:19", "throughput": "620.79", "total_tokens": 1100528}
|
162 |
-
{"current_steps": 162, "total_steps": 190, "loss": 0.0012, "learning_rate": 2.9263101785268253e-07, "epoch": 4.180645161290323, "percentage": 85.26, "elapsed_time": "0:29:43", "remaining_time": "0:05:08", "throughput": "620.89", "total_tokens": 1107520}
|
163 |
-
{"current_steps": 163, "total_steps": 190, "loss": 0.0001, "learning_rate": 2.7248368952908055e-07, "epoch": 4.2064516129032254, "percentage": 85.79, "elapsed_time": "0:29:54", "remaining_time": "0:04:57", "throughput": "620.82", "total_tokens": 1114208}
|
164 |
-
{"current_steps": 164, "total_steps": 190, "loss": 0.0002, "learning_rate": 2.53014884252083e-07, "epoch": 4.232258064516129, "percentage": 86.32, "elapsed_time": "0:30:05", "remaining_time": "0:04:46", "throughput": "620.76", "total_tokens": 1120912}
|
165 |
-
{"current_steps": 165, "total_steps": 190, "loss": 0.0001, "learning_rate": 2.3423053240837518e-07, "epoch": 4.258064516129032, "percentage": 86.84, "elapsed_time": "0:30:16", "remaining_time": "0:04:35", "throughput": "620.84", "total_tokens": 1127872}
|
166 |
-
{"current_steps": 166, "total_steps": 190, "loss": 0.0003, "learning_rate": 2.1613635589349756e-07, "epoch": 4.283870967741936, "percentage": 87.37, "elapsed_time": "0:30:27", "remaining_time": "0:04:24", "throughput": "620.65", "total_tokens": 1134352}
|
167 |
-
{"current_steps": 167, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.9873786636889908e-07, "epoch": 4.309677419354839, "percentage": 87.89, "elapsed_time": "0:30:38", "remaining_time": "0:04:13", "throughput": "620.79", "total_tokens": 1141440}
|
168 |
-
{"current_steps": 168, "total_steps": 190, "loss": 0.0007, "learning_rate": 1.8204036358303173e-07, "epoch": 4.335483870967742, "percentage": 88.42, "elapsed_time": "0:30:49", "remaining_time": "0:04:02", "throughput": "620.70", "total_tokens": 1148096}
|
169 |
-
{"current_steps": 169, "total_steps": 190, "loss": 0.0003, "learning_rate": 1.6604893375699594e-07, "epoch": 4.361290322580645, "percentage": 88.95, "elapsed_time": "0:31:00", "remaining_time": "0:03:51", "throughput": "620.56", "total_tokens": 1154672}
|
170 |
-
{"current_steps": 170, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.507684480352292e-07, "epoch": 4.387096774193548, "percentage": 89.47, "elapsed_time": "0:31:11", "remaining_time": "0:03:40", "throughput": "620.70", "total_tokens": 1161744}
|
171 |
-
{"current_steps": 171, "total_steps": 190, "loss": 0.0008, "learning_rate": 1.362035610017079e-07, "epoch": 4.412903225806452, "percentage": 90.0, "elapsed_time": "0:31:22", "remaining_time": "0:03:29", "throughput": "620.90", "total_tokens": 1168944}
|
172 |
-
{"current_steps": 172, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.223587092621162e-07, "epoch": 4.438709677419355, "percentage": 90.53, "elapsed_time": "0:31:33", "remaining_time": "0:03:18", "throughput": "620.92", "total_tokens": 1175792}
|
173 |
-
{"current_steps": 173, "total_steps": 190, "loss": 0.0004, "learning_rate": 1.0923811009241142e-07, "epoch": 4.464516129032258, "percentage": 91.05, "elapsed_time": "0:31:44", "remaining_time": "0:03:07", "throughput": "621.02", "total_tokens": 1182800}
|
174 |
-
{"current_steps": 174, "total_steps": 190, "loss": 0.0001, "learning_rate": 9.684576015420277e-08, "epoch": 4.490322580645161, "percentage": 91.58, "elapsed_time": "0:31:55", "remaining_time": "0:02:56", "throughput": "620.99", "total_tokens": 1189568}
|
175 |
-
{"current_steps": 175, "total_steps": 190, "loss": 0.0005, "learning_rate": 8.518543427732951e-08, "epoch": 4.516129032258064, "percentage": 92.11, "elapsed_time": "0:32:06", "remaining_time": "0:02:45", "throughput": "621.03", "total_tokens": 1196480}
|
176 |
-
{"current_steps": 176, "total_steps": 190, "loss": 0.0076, "learning_rate": 7.426068431000883e-08, "epoch": 4.541935483870968, "percentage": 92.63, "elapsed_time": "0:32:17", "remaining_time": "0:02:34", "throughput": "621.25", "total_tokens": 1203728}
|
177 |
-
{"current_steps": 177, "total_steps": 190, "loss": 0.0004, "learning_rate": 6.407483803691216e-08, "epoch": 4.567741935483871, "percentage": 93.16, "elapsed_time": "0:32:28", "remaining_time": "0:02:23", "throughput": "621.12", "total_tokens": 1210320}
|
178 |
-
{"current_steps": 178, "total_steps": 190, "loss": 0.004, "learning_rate": 5.463099816548578e-08, "epoch": 4.593548387096774, "percentage": 93.68, "elapsed_time": "0:32:39", "remaining_time": "0:02:12", "throughput": "621.16", "total_tokens": 1217248}
|
179 |
-
{"current_steps": 179, "total_steps": 190, "loss": 0.0001, "learning_rate": 4.593204138084006e-08, "epoch": 4.619354838709677, "percentage": 94.21, "elapsed_time": "0:32:50", "remaining_time": "0:02:01", "throughput": "621.22", "total_tokens": 1224192}
|
180 |
-
{"current_steps": 180, "total_steps": 190, "loss": 0.0005, "learning_rate": 3.798061746947995e-08, "epoch": 4.645161290322581, "percentage": 94.74, "elapsed_time": "0:33:01", "remaining_time": "0:01:50", "throughput": "621.17", "total_tokens": 1230896}
|
181 |
-
{"current_steps": 181, "total_steps": 190, "loss": 0.0002, "learning_rate": 3.077914851215585e-08, "epoch": 4.670967741935484, "percentage": 95.26, "elapsed_time": "0:33:12", "remaining_time": "0:01:39", "throughput": "621.23", "total_tokens": 1237840}
|
182 |
-
{"current_steps": 182, "total_steps": 190, "loss": 0.0001, "learning_rate": 2.4329828146074096e-08, "epoch": 4.6967741935483875, "percentage": 95.79, "elapsed_time": "0:33:23", "remaining_time": "0:01:28", "throughput": "621.25", "total_tokens": 1244688}
|
183 |
-
{"current_steps": 183, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.8634620896695044e-08, "epoch": 4.72258064516129, "percentage": 96.32, "elapsed_time": "0:33:34", "remaining_time": "0:01:17", "throughput": "621.28", "total_tokens": 1251584}
|
184 |
-
{"current_steps": 184, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.3695261579316776e-08, "epoch": 4.748387096774193, "percentage": 96.84, "elapsed_time": "0:33:45", "remaining_time": "0:01:06", "throughput": "621.30", "total_tokens": 1258448}
|
185 |
-
{"current_steps": 185, "total_steps": 190, "loss": 0.0167, "learning_rate": 9.513254770636138e-09, "epoch": 4.774193548387097, "percentage": 97.37, "elapsed_time": "0:33:56", "remaining_time": "0:00:55", "throughput": "621.38", "total_tokens": 1265440}
|
186 |
-
{"current_steps": 186, "total_steps": 190, "loss": 0.0002, "learning_rate": 6.089874350439507e-09, "epoch": 4.8, "percentage": 97.89, "elapsed_time": "0:34:07", "remaining_time": "0:00:44", "throughput": "621.28", "total_tokens": 1272080}
|
187 |
-
{"current_steps": 187, "total_steps": 190, "loss": 0.0003, "learning_rate": 3.4261631135654174e-09, "epoch": 4.825806451612904, "percentage": 98.42, "elapsed_time": "0:34:18", "remaining_time": "0:00:33", "throughput": "621.31", "total_tokens": 1278976}
|
188 |
-
{"current_steps": 188, "total_steps": 190, "loss": 0.0024, "learning_rate": 1.5229324522605949e-09, "epoch": 4.851612903225806, "percentage": 98.95, "elapsed_time": "0:34:29", "remaining_time": "0:00:22", "throughput": "621.29", "total_tokens": 1285760}
|
189 |
-
{"current_steps": 189, "total_steps": 190, "loss": 0.0005, "learning_rate": 3.8076210902182607e-10, "epoch": 4.877419354838709, "percentage": 99.47, "elapsed_time": "0:34:40", "remaining_time": "0:00:11", "throughput": "621.19", "total_tokens": 1292368}
|
190 |
-
{"current_steps": 190, "total_steps": 190, "loss": 0.0017, "learning_rate": 0.0, "epoch": 4.903225806451613, "percentage": 100.0, "elapsed_time": "0:34:51", "remaining_time": "0:00:00", "throughput": "621.15", "total_tokens": 1299120}
|
191 |
-
{"current_steps": 190, "total_steps": 190, "epoch": 4.903225806451613, "percentage": 100.0, "elapsed_time": "0:35:42", "remaining_time": "0:00:00", "throughput": "606.30", "total_tokens": 1299120}
|
|
|
1 |
+
{"current_steps": 5, "total_steps": 79, "percentage": 6.33, "elapsed_time": "0:00:00", "remaining_time": "0:00:05"}
|
2 |
+
{"current_steps": 10, "total_steps": 79, "percentage": 12.66, "elapsed_time": "0:00:00", "remaining_time": "0:00:05"}
|
3 |
+
{"current_steps": 15, "total_steps": 79, "percentage": 18.99, "elapsed_time": "0:00:01", "remaining_time": "0:00:05"}
|
4 |
+
{"current_steps": 20, "total_steps": 79, "percentage": 25.32, "elapsed_time": "0:00:01", "remaining_time": "0:00:05"}
|
5 |
+
{"current_steps": 25, "total_steps": 79, "percentage": 31.65, "elapsed_time": "0:00:02", "remaining_time": "0:00:04"}
|
6 |
+
{"current_steps": 30, "total_steps": 79, "percentage": 37.97, "elapsed_time": "0:00:02", "remaining_time": "0:00:04"}
|
7 |
+
{"current_steps": 35, "total_steps": 79, "percentage": 44.3, "elapsed_time": "0:00:03", "remaining_time": "0:00:04"}
|
8 |
+
{"current_steps": 40, "total_steps": 79, "percentage": 50.63, "elapsed_time": "0:00:03", "remaining_time": "0:00:03"}
|
9 |
+
{"current_steps": 45, "total_steps": 79, "percentage": 56.96, "elapsed_time": "0:00:04", "remaining_time": "0:00:03"}
|
10 |
+
{"current_steps": 50, "total_steps": 79, "percentage": 63.29, "elapsed_time": "0:00:04", "remaining_time": "0:00:02"}
|
11 |
+
{"current_steps": 55, "total_steps": 79, "percentage": 69.62, "elapsed_time": "0:00:05", "remaining_time": "0:00:02"}
|
12 |
+
{"current_steps": 60, "total_steps": 79, "percentage": 75.95, "elapsed_time": "0:00:05", "remaining_time": "0:00:01"}
|
13 |
+
{"current_steps": 65, "total_steps": 79, "percentage": 82.28, "elapsed_time": "0:00:05", "remaining_time": "0:00:01"}
|
14 |
+
{"current_steps": 70, "total_steps": 79, "percentage": 88.61, "elapsed_time": "0:00:06", "remaining_time": "0:00:00"}
|
15 |
+
{"current_steps": 75, "total_steps": 79, "percentage": 94.94, "elapsed_time": "0:00:06", "remaining_time": "0:00:00"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args.yaml
CHANGED
@@ -1,30 +1,18 @@
|
|
1 |
-
bf16: true
|
2 |
cutoff_len: 1024
|
3 |
-
dataset:
|
4 |
dataset_dir: data
|
5 |
-
|
6 |
-
deepspeed: cache/ds_z2_config.json
|
7 |
-
do_train: true
|
8 |
finetuning_type: full
|
9 |
flash_attn: auto
|
10 |
-
|
11 |
-
include_num_input_tokens_seen: true
|
12 |
-
learning_rate: 5.0e-06
|
13 |
-
logging_steps: 1
|
14 |
-
lr_scheduler_type: cosine
|
15 |
-
max_grad_norm: 1.0
|
16 |
max_samples: 100000
|
17 |
-
model_name_or_path:
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
packing: false
|
22 |
-
per_device_train_batch_size: 2
|
23 |
-
plot_loss: true
|
24 |
preprocessing_num_workers: 16
|
25 |
quantization_method: bitsandbytes
|
26 |
-
report_to: none
|
27 |
-
save_steps: 1000
|
28 |
stage: sft
|
|
|
29 |
template: llama2
|
30 |
-
|
|
|
|
|
1 |
cutoff_len: 1024
|
2 |
+
dataset: truth_dev_0716_2
|
3 |
dataset_dir: data
|
4 |
+
do_predict: true
|
|
|
|
|
5 |
finetuning_type: full
|
6 |
flash_attn: auto
|
7 |
+
max_new_tokens: 512
|
|
|
|
|
|
|
|
|
|
|
8 |
max_samples: 100000
|
9 |
+
model_name_or_path: saves/LLaMA2-7B-Chat/full/train_2024-07-16-16-48-49_llama2_2
|
10 |
+
output_dir: saves/LLaMA2-7B-Chat/full/eval_2024-07-16-17-27-37
|
11 |
+
per_device_eval_batch_size: 2
|
12 |
+
predict_with_generate: true
|
|
|
|
|
|
|
13 |
preprocessing_num_workers: 16
|
14 |
quantization_method: bitsandbytes
|
|
|
|
|
15 |
stage: sft
|
16 |
+
temperature: 0.95
|
17 |
template: llama2
|
18 |
+
top_p: 0.7
|