Training in progress, step 27
Browse files- config.json +26 -0
- run_dpo.sh +34 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +46 -0
- trainin.log +840 -0
- training_args.bin +3 -0
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/share/home/models/CHT-Mistral-7B/CHT-6/",
|
3 |
+
"architectures": [
|
4 |
+
"MistralForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 1,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 4096,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 14336,
|
13 |
+
"max_position_embeddings": 32768,
|
14 |
+
"model_type": "mistral",
|
15 |
+
"num_attention_heads": 32,
|
16 |
+
"num_hidden_layers": 32,
|
17 |
+
"num_key_value_heads": 8,
|
18 |
+
"rms_norm_eps": 1e-05,
|
19 |
+
"rope_theta": 10000.0,
|
20 |
+
"sliding_window": 4096,
|
21 |
+
"tie_word_embeddings": false,
|
22 |
+
"torch_dtype": "bfloat16",
|
23 |
+
"transformers_version": "4.42.4",
|
24 |
+
"use_cache": false,
|
25 |
+
"vocab_size": 32000
|
26 |
+
}
|
run_dpo.sh
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# export NCCL_P2P_DISABLE="1"
|
2 |
+
# export NCCL_IB_DISABLE="1"
|
3 |
+
export OMP_NUM_THREADS=1
|
4 |
+
export ACCELERATE_LOG_LEVEL=info
|
5 |
+
|
6 |
+
|
7 |
+
export WANDB_PROJECT='Mistral-DPO'
|
8 |
+
export WANDB_ENTITY="ckip"
|
9 |
+
|
10 |
+
output_dir="DPO/CHT-6_20240715-1255"
|
11 |
+
# Check if the directory does not exist
|
12 |
+
if [ -d ${output_dir} ]; then
|
13 |
+
echo "The directory already exists."
|
14 |
+
exit
|
15 |
+
fi
|
16 |
+
mkdir -p ${output_dir}
|
17 |
+
current_script=$0
|
18 |
+
cp ${current_script} ${output_dir}
|
19 |
+
|
20 |
+
|
21 |
+
accelerate launch \
|
22 |
+
--config_file alignment-handbook/recipes/accelerate_configs/deepspeed_zero3.yaml \
|
23 |
+
--num_processes 4 \
|
24 |
+
test_dpo.py \
|
25 |
+
"dpo_config_full.yaml" \
|
26 |
+
--load_in_4bit=false \
|
27 |
+
--logging_steps=1 \
|
28 |
+
--model_name_or_path="/share/home/models/CHT-Mistral-7B/CHT-6/" \
|
29 |
+
--train_file="data/training_ptt_dpo-include_id-converted.jsonl" \
|
30 |
+
--validation_file="data/testing_ptt_dpo_include_id-converted.jsonl" \
|
31 |
+
--cache_dir="cache_dir" \
|
32 |
+
--output_dir=${output_dir} \
|
33 |
+
| tee ${output_dir}/trainin.log
|
34 |
+
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<unk>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
|
3 |
+
size 493443
|
tokenizer_config.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": null,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<unk>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"2": {
|
23 |
+
"content": "</s>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": false,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
}
|
30 |
+
},
|
31 |
+
"additional_special_tokens": [],
|
32 |
+
"bos_token": "<s>",
|
33 |
+
"chat_template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% set messages = messages[1:] %}{% else %}{% set system_message = 'SYSTEM:你是由Mistral AI所開發、經台灣詞庫小組(CKIP)後續優化的語言模型,優化目標為提升台灣正體中文任務的表現與在地化,主要的使用者來自於台灣。\n\n' %}{% endif %}{{system_message}}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate system/user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ 'Human:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant:\n' + message['content'] + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:\n' }}{% endif %}",
|
34 |
+
"clean_up_tokenization_spaces": false,
|
35 |
+
"eos_token": "</s>",
|
36 |
+
"legacy": true,
|
37 |
+
"model_max_length": 4096,
|
38 |
+
"pad_token": "</s>",
|
39 |
+
"padding_side": "left",
|
40 |
+
"sp_model_kwargs": {},
|
41 |
+
"spaces_between_special_tokens": false,
|
42 |
+
"tokenizer_class": "LlamaTokenizer",
|
43 |
+
"truncation_side": "left",
|
44 |
+
"unk_token": "<unk>",
|
45 |
+
"use_default_system_prompt": false
|
46 |
+
}
|
trainin.log
ADDED
@@ -0,0 +1,840 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2024-07-15 08:06:36,677] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
2 |
+
[2024-07-15 08:08:02,878] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
3 |
+
[2024-07-15 08:08:02,878] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
4 |
+
[2024-07-15 08:08:02,882] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
5 |
+
[2024-07-15 08:08:02,914] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
|
6 |
+
[2024-07-15 08:08:35,144] [INFO] [comm.py:637:init_distributed] cdb=None
|
7 |
+
[2024-07-15 08:08:35,144] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
|
8 |
+
[2024-07-15 08:08:35,149] [INFO] [comm.py:637:init_distributed] cdb=None
|
9 |
+
[2024-07-15 08:08:35,153] [INFO] [comm.py:637:init_distributed] cdb=None
|
10 |
+
[2024-07-15 08:08:35,157] [INFO] [comm.py:637:init_distributed] cdb=None
|
11 |
+
2024-07-15 08:08:35 - INFO - __main__ - Model parameters ModelArguments(base_model_revision=None, model_name_or_path='/share/home/models/CHT-Mistral-7B/CHT-6/', model_revision='main', model_code_revision=None, torch_dtype=None, tokenizer_name_or_path=None, trust_remote_code=False, use_flash_attention_2=False, use_peft=False, lora_r=16, lora_alpha=32, lora_dropout=0.05, lora_target_modules=None, lora_modules_to_save=None, load_in_8bit=False, load_in_4bit=False, bnb_4bit_quant_type='nf4', use_bnb_nested_quant=False, bnb_4bit_quant_storage='uint8')
|
12 |
+
2024-07-15 08:08:35 - INFO - __main__ - Data parameters DataArguments(chat_template=None, text_column='text', preprocessing_num_workers=12, truncation_side=None, auto_insert_empty_system_msg=True, train_file='data/training_ptt_dpo-include_id-converted.jsonl', validation_file='data/testing_ptt_dpo_include_id-converted.jsonl', cache_dir='cache_dir')
|
13 |
+
2024-07-15 08:08:35 - INFO - __main__ - Training/evaluation parameters DPOConfig(
|
14 |
+
_n_gpu=1,
|
15 |
+
accelerator_config={'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None, 'use_configured_state': False},
|
16 |
+
adafactor=False,
|
17 |
+
adam_beta1=0.9,
|
18 |
+
adam_beta2=0.999,
|
19 |
+
adam_epsilon=1e-08,
|
20 |
+
auto_find_batch_size=False,
|
21 |
+
batch_eval_metrics=False,
|
22 |
+
beta=0.01,
|
23 |
+
bf16=True,
|
24 |
+
bf16_full_eval=False,
|
25 |
+
data_seed=None,
|
26 |
+
dataloader_drop_last=False,
|
27 |
+
dataloader_num_workers=0,
|
28 |
+
dataloader_persistent_workers=False,
|
29 |
+
dataloader_pin_memory=True,
|
30 |
+
dataloader_prefetch_factor=None,
|
31 |
+
dataset_num_proc=None,
|
32 |
+
ddp_backend=None,
|
33 |
+
ddp_broadcast_buffers=None,
|
34 |
+
ddp_bucket_cap_mb=None,
|
35 |
+
ddp_find_unused_parameters=None,
|
36 |
+
ddp_timeout=1800,
|
37 |
+
debug=[],
|
38 |
+
deepspeed=None,
|
39 |
+
disable_dropout=True,
|
40 |
+
disable_tqdm=False,
|
41 |
+
dispatch_batches=None,
|
42 |
+
do_eval=True,
|
43 |
+
do_predict=False,
|
44 |
+
do_train=False,
|
45 |
+
eval_accumulation_steps=None,
|
46 |
+
eval_delay=0,
|
47 |
+
eval_do_concat_batches=True,
|
48 |
+
eval_on_start=False,
|
49 |
+
eval_steps=100,
|
50 |
+
eval_strategy=steps,
|
51 |
+
evaluation_strategy=steps,
|
52 |
+
f_alpha_divergence_coef=1.0,
|
53 |
+
f_divergence_type=FDivergenceType.REVERSE_KL,
|
54 |
+
force_use_ref_model=False,
|
55 |
+
fp16=False,
|
56 |
+
fp16_backend=auto,
|
57 |
+
fp16_full_eval=False,
|
58 |
+
fp16_opt_level=O1,
|
59 |
+
fsdp=[],
|
60 |
+
fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False},
|
61 |
+
fsdp_min_num_params=0,
|
62 |
+
fsdp_transformer_layer_cls_to_wrap=None,
|
63 |
+
full_determinism=False,
|
64 |
+
generate_during_eval=False,
|
65 |
+
gradient_accumulation_steps=2,
|
66 |
+
gradient_checkpointing=True,
|
67 |
+
gradient_checkpointing_kwargs={'use_reentrant': False},
|
68 |
+
greater_is_better=None,
|
69 |
+
group_by_length=False,
|
70 |
+
half_precision_backend=auto,
|
71 |
+
hub_always_push=False,
|
72 |
+
hub_model_id=None,
|
73 |
+
hub_model_revision=main,
|
74 |
+
hub_private_repo=False,
|
75 |
+
hub_strategy=every_save,
|
76 |
+
hub_token=<HUB_TOKEN>,
|
77 |
+
ignore_data_skip=False,
|
78 |
+
include_inputs_for_metrics=False,
|
79 |
+
include_num_input_tokens_seen=False,
|
80 |
+
include_tokens_per_second=False,
|
81 |
+
is_encoder_decoder=None,
|
82 |
+
jit_mode_eval=False,
|
83 |
+
label_names=None,
|
84 |
+
label_pad_token_id=-100,
|
85 |
+
label_smoothing=0,
|
86 |
+
label_smoothing_factor=0.0,
|
87 |
+
learning_rate=5e-07,
|
88 |
+
length_column_name=length,
|
89 |
+
load_best_model_at_end=False,
|
90 |
+
local_rank=0,
|
91 |
+
log_level=info,
|
92 |
+
log_level_replica=warning,
|
93 |
+
log_on_each_node=True,
|
94 |
+
logging_dir=results/DPO/output_dir/runs/Jul15_08-08-35_CKIP-LLM-VM2,
|
95 |
+
logging_first_step=True,
|
96 |
+
logging_nan_inf_filter=True,
|
97 |
+
logging_steps=1.0,
|
98 |
+
logging_strategy=steps,
|
99 |
+
loss_type=sigmoid,
|
100 |
+
lr_scheduler_kwargs={},
|
101 |
+
lr_scheduler_type=cosine,
|
102 |
+
max_grad_norm=1.0,
|
103 |
+
max_length=1024,
|
104 |
+
max_prompt_length=512,
|
105 |
+
max_steps=-1,
|
106 |
+
max_target_length=None,
|
107 |
+
metric_for_best_model=None,
|
108 |
+
model_adapter_name=None,
|
109 |
+
model_init_kwargs=None,
|
110 |
+
mp_parameters=,
|
111 |
+
neftune_noise_alpha=None,
|
112 |
+
no_cuda=False,
|
113 |
+
num_train_epochs=1,
|
114 |
+
optim=adamw_torch,
|
115 |
+
optim_args=None,
|
116 |
+
optim_target_modules=None,
|
117 |
+
output_dir=DPO/CHT-6_20240715-1255,
|
118 |
+
overwrite_output_dir=False,
|
119 |
+
padding_value=None,
|
120 |
+
past_index=-1,
|
121 |
+
per_device_eval_batch_size=8,
|
122 |
+
per_device_train_batch_size=8,
|
123 |
+
precompute_ref_log_probs=False,
|
124 |
+
prediction_loss_only=False,
|
125 |
+
push_to_hub=True,
|
126 |
+
push_to_hub_model_id=None,
|
127 |
+
push_to_hub_organization=None,
|
128 |
+
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
|
129 |
+
ray_scope=last,
|
130 |
+
ref_adapter_name=None,
|
131 |
+
ref_model_init_kwargs=None,
|
132 |
+
ref_model_mixup_alpha=0.9,
|
133 |
+
ref_model_sync_steps=64,
|
134 |
+
reference_free=False,
|
135 |
+
remove_unused_columns=False,
|
136 |
+
report_to=['tensorboard', 'wandb'],
|
137 |
+
restore_callback_states_from_checkpoint=False,
|
138 |
+
resume_from_checkpoint=None,
|
139 |
+
rpo_alpha=None,
|
140 |
+
run_name=None,
|
141 |
+
save_on_each_node=False,
|
142 |
+
save_only_model=False,
|
143 |
+
save_safetensors=True,
|
144 |
+
save_steps=100,
|
145 |
+
save_strategy=steps,
|
146 |
+
save_total_limit=1,
|
147 |
+
seed=42,
|
148 |
+
skip_memory_metrics=True,
|
149 |
+
split_batches=None,
|
150 |
+
sync_ref_model=False,
|
151 |
+
tf32=None,
|
152 |
+
torch_compile=False,
|
153 |
+
torch_compile_backend=None,
|
154 |
+
torch_compile_mode=None,
|
155 |
+
torchdynamo=None,
|
156 |
+
tpu_metrics_debug=False,
|
157 |
+
tpu_num_cores=None,
|
158 |
+
truncation_mode=keep_end,
|
159 |
+
use_cpu=False,
|
160 |
+
use_ipex=False,
|
161 |
+
use_legacy_prediction_loop=False,
|
162 |
+
use_mps_device=False,
|
163 |
+
warmup_ratio=0.1,
|
164 |
+
warmup_steps=0,
|
165 |
+
weight_decay=0.0,
|
166 |
+
)
|
167 |
+
DatasetDict({
|
168 |
+
train: Dataset({
|
169 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
170 |
+
num_rows: 1728
|
171 |
+
})
|
172 |
+
test: Dataset({
|
173 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
174 |
+
num_rows: 91
|
175 |
+
})
|
176 |
+
})DatasetDict({
|
177 |
+
train: Dataset({
|
178 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
179 |
+
num_rows: 1728
|
180 |
+
})
|
181 |
+
test: Dataset({
|
182 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
183 |
+
num_rows: 91
|
184 |
+
})
|
185 |
+
})
|
186 |
+
|
187 |
+
DatasetDict({
|
188 |
+
train: Dataset({
|
189 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
190 |
+
num_rows: 1728
|
191 |
+
})
|
192 |
+
test: Dataset({
|
193 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
194 |
+
num_rows: 91
|
195 |
+
})
|
196 |
+
})
|
197 |
+
DatasetDict({
|
198 |
+
train: Dataset({
|
199 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
200 |
+
num_rows: 1728
|
201 |
+
})
|
202 |
+
test: Dataset({
|
203 |
+
features: ['prompt', 'chosen', 'rejected', 'messages'],
|
204 |
+
num_rows: 91
|
205 |
+
})
|
206 |
+
})
|
207 |
+
2024-07-15 08:08:37 - INFO - __main__ - Decontaminated 0 (0.00%) samples from the training set.
|
208 |
+
2024-07-15 08:08:37 - INFO - __main__ - Prompt sample 1309 of the raw training set:
|
209 |
+
|
210 |
+
<s>Human:
|
211 |
+
文章:輿情作者,日期,輿情內文
|
212 |
+
fspt01,6/20,1.近期晚上4G行動網路常常中斷,從110/05/28開始,特別開始記錄4G網路品質。 我也不是沒有先跟中華電信客服人員反應遇到的障礙,但畢竟是跟固定設備投資 有關,客服人員、機房人員只能敷衍了事。 2.一直以來,很少關心公民議題,這次被搞到一肚子火,認真思考有那些機構,可以 讓我們老百姓反應中華電信壟斷性議題。 網路上看到這則, http://www.ftc.gov.tw/uploadDecision/dd3d922b-bc37-4911-8141-b42779405ab4.pdf 我是看不太懂,大概了解「公平交易委員會」處分「中華電信股份有限公司」新台幣 500萬元罰鍰。深深感到,這或許是一條路。 3.因為我不懂很多東西,希望各個專業領域版友能提供一些資訊。
|
213 |
+
ebbf,06/20 23:53,有人壓著你只能選種花?
|
214 |
+
bcd21086999,06/21 00:22,噓
|
215 |
+
cv17,06/21 00:24,???
|
216 |
+
obuibo2000,06/21 00:26,任何室內網路不良的,我只推固網.
|
217 |
+
obuibo2000,06/21 00:26,要賭便宜有便宜的作法,地點好或是挑業者。
|
218 |
+
ufoubjlbotp,06/21 00:36,人家罰固網廣告不實,與壟斷何干
|
219 |
+
kbto4560,06/21 00:38,中華電信問題:使用者問題
|
220 |
+
kbto4560,06/21 00:38,XXX電信問題:誰叫你要用XXX
|
221 |
+
djuzqpsu,06/21 00:41,光世代的廣告跟4G訊號有啥關係???
|
222 |
+
gvlbp21,06/21 01:02,呵...為何不攜碼出去?
|
223 |
+
qbvm40807,06/21 01:10,真的爛要求無條件解約就好了 台灣有5間電信 比中國
|
224 |
+
qbvm40807,06/21 01:10,還多欸 更不用說背後也都是獨立的大財團 說壟斷? 你
|
225 |
+
qbvm40807,06/21 01:10,是認真的嗎???
|
226 |
+
lojhiu326,06/21 01:32,扯到壟斷就太過了XD 當其他電信塑膠?
|
227 |
+
lplpllp416,06/21 01:38,去申請別家試用看看,哪家情況好就跳哪家,你這種
|
228 |
+
lplpllp416,06/21 01:38,情況下為何執著於同一家…
|
229 |
+
sa759,06/21 01:50,壟...壟斷4G?
|
230 |
+
TvohIzvo,06/21 02:04,推!權益是爭取來的!
|
231 |
+
Bovunjbp,06/21 02:46,不是湊足關鍵字就能遮掩你是想公審中華電信的奧客
|
232 |
+
zffpvp,06/21 03:02,中華4G真的爛 標5G亂喊價 把4G用戶拖下水
|
233 |
+
bsjbeof,06/21 04:39,也許你家是其他業者黃金地段 快跳槽吧 這沒壟斷 XD
|
234 |
+
bsjbeof,06/21 04:43,種花也許在你家地段受到其他抗議而放棄但不構成壟斷
|
235 |
+
nbocpx77,06/21 06:26,你先翻開合約好好看一下 就不會想去浪費這個時間了
|
236 |
+
Epsffo1010,06/21 06:55,.........
|
237 |
+
ay27771531,06/21 07:20,所以你要解釋那裡壟斷了嗎
|
238 |
+
Gn4o,06/21 07:28,……
|
239 |
+
ex7931425,06/21 07:58,中華電信壟斷?
|
240 |
+
h6i630,06/21 09:26,你不是只有中華可選,何來壟斷=.=?
|
241 |
+
hbnfljoh,06/21 10:19,不懂的東西你可以查清楚了在上來發文 顆顆
|
242 |
+
tpwb0809,06/21 11:17,測智商的發文
|
243 |
+
SD8377,06/21 11:50,什麼啦
|
244 |
+
NfohYjbo,06/21 11:56,中華4g哪有壟斷,原po是不是搞錯什麼了
|
245 |
+
kvtujo332805,06/21 12:07,過了超過12小時沒回應
|
246 |
+
kvtujo332805,06/21 12:07,…感覺又是一篇射後���理文了
|
247 |
+
ol950357,06/21 12:09,???????????壟斷在那裡???
|
248 |
+
tpoza700,06/21 12:19,?
|
249 |
+
cfopbjdp,06/21 12:33,台灣鯛好吃
|
250 |
+
bqqmfxbsn,06/21 14:19,呃呃呃,該從哪裡開始吐槽
|
251 |
+
ibolthp2012,06/21 15:39,這叫公民議題?
|
252 |
+
nfqbmjoh,06/21 16:12,請先理解甚麼是行動網路跟固定網路好嗎 別的先不說
|
253 |
+
nfqbmjoh,06/21 16:12,拿光世代的資料比行動網路???????
|
254 |
+
tijofMpwf,06/21 18:41,這是公民議題?
|
255 |
+
dbnfmmbmb,06/22 03:05,…
|
256 |
+
djnpof : 認真回,06/22 10:02,能歸責於種花的大概只有網路太多人使用以致
|
257 |
+
djnpof,06/22 10:05,網路中斷 其他很難要歸責種花 (干擾或訊號不佳等)
|
258 |
+
djnpof,06/22 10:08,曾有聽過有人因鄰居晚間使用電器而造成網路中斷的
|
259 |
+
djnpof,06/22 10:09,像這種狀況的 是種花的問題嗎?
|
260 |
+
bmfwfmnbo,06/22 11:25,疫情開始後,網路確實慢很多,共體時艱吧
|
261 |
+
===
|
262 |
+
問題:請對以上討論進行所有意見的分類與摘要,類別有六種: 建議、質疑、疑問、抱怨、讚美、提供資訊。意見摘要用條列的方式呈現,每條意見之後用括號包含持此意見的發>言者,原本文章作者的意見也必須一併分析,同一條意見可能包含不只一人。
|
263 |
+
|
264 |
+
|
265 |
+
2024-07-15 08:08:37 - INFO - __main__ - Chosen sample 1309 of the raw training set:
|
266 |
+
|
267 |
+
Assistant:
|
268 |
+
建議:
|
269 |
+
- 建議可以換其他電信業者,並試試其他業者的服務品質如何 (ebbf,obuibo2000,lplpllp416,bsjbeof)
|
270 |
+
- 建議對方先翻開並理解合約內容,便不會浪費時間 (nbocpx77)
|
271 |
+
- 固定網路比較不會出現問題,不應該使用行動網路來取代 (obuibo2000)
|
272 |
+
- 如果真的覺得爛到無法忍受,可以請求無條件解約 (qbvm40807)
|
273 |
+
|
274 |
+
質疑:
|
275 |
+
- 中華電信的問題被視為用戶問題,其他電信的問題則是因為選擇該電信業者 (kbto4560)
|
276 |
+
- 中華電信4G和5G的問題和光世代的廣告無關 (djuzqpsu)
|
277 |
+
- 對於原po以壟斷來批評中華電信的用詞有疑問 (ufoubjlbotp, qbvm40807, lojhiu326, sa759, ex7931425,h6i630,NfohYjbo)
|
278 |
+
- 懷疑原po是個奧客,想要公審中華電信 (Bovunjbp)
|
279 |
+
- 懷疑原po沒有好好理解公平交易法和壟斷的意義 (hbnfljoh)
|
280 |
+
- 懷疑原po發完文後就沒有回應,可能是在故意搞事 (kvtujo332805)
|
281 |
+
|
282 |
+
疑問:
|
283 |
+
- 原po所說的壟斷是指什麼?為何認為中華電信壟斷了行動網路市場?(ay27771531, lojhiu326,NfohYjbo,ol950357,h6i630)
|
284 |
+
- 不理解原po為什麼堅持使用同一家信號不好的電信業者而不換家試試 (lplpllp416,Gn4o)
|
285 |
+
|
286 |
+
抱怨:
|
287 |
+
- 抱怨中華電信4G的不穩定,標5G價格卻又拖累4G用戶 (fspt01, zffpvp)
|
288 |
+
|
289 |
+
讚美:
|
290 |
+
- 原po對公民議題的態度值得肯定。 (TvohIzvo)
|
291 |
+
|
292 |
+
提供資訊:
|
293 |
+
- 台灣有五家電信業者,背後都有大財團支援,沒有壟斷的問題 (qbvm40807)
|
294 |
+
- 台灣的網路使用量在疫情期間增加,速度變慢是共體時艱 (bmfwfmnbo)
|
295 |
+
- 網路使用時段、設備種類、位置等因素都可能影響網路品質,並非完全由電信業者控制 (djnpof)</s>
|
296 |
+
2024-07-15 08:08:37 - INFO - __main__ - Rejected sample 1309 of the raw training set:
|
297 |
+
|
298 |
+
Assistant:
|
299 |
+
建議:
|
300 |
+
1. 應該讓老百姓反應中華電信壟斷性議題 (fspt01)
|
301 |
+
2. 應該攜碼出去 (gvlbp21)
|
302 |
+
3. 應該去申請別家試用看看,哪家情況好就跳哪家 (lplpllp416)
|
303 |
+
4. 應該先翻開合約好好看一下,就不會想去浪費這個時間了 (nbocpx77)
|
304 |
+
|
305 |
+
質疑:
|
306 |
+
1. 質疑中華電信是否真的壟斷4G (sa759, a7931425, h6i630, hbnfljoh, ol950357, tpoza700, cfopbjdp, bqqmfxbsn, ibolthp2012, nfqbmjoh, tijofMpwf, dbnfmmbmb)
|
307 |
+
2. 質疑種花是否構成壟斷 (bsjbeof)
|
308 |
+
|
309 |
+
疑問:
|
310 |
+
1. 疑問公民議題是什麼 (fspt01, ibolthp2012, nfqbmjoh, tijofMpwf)
|
311 |
+
2. 疑問壟斷在哪裡 (ay27771531, Gn4o, ex7931425, ol950357, tpoza700, cfopbjdp, bqqmfxbsn, ibolthp2012, nfqbmjoh, tijofMpwf, dbnfmmbmb)
|
312 |
+
|
313 |
+
抱怨:
|
314 |
+
1. 抱怨4G網路品質不好 (fspt01)
|
315 |
+
2. 抱怨中華電信壟斷 (fspt01, kbto4560, zffpvp, bsjbeof)
|
316 |
+
3. 抱怨網路中斷 (nfqbmjoh)
|
317 |
+
|
318 |
+
讚美:
|
319 |
+
1. 讚美固網 (obuibo2000)
|
320 |
+
2. 讚美公平交易委員會處分中華電信的行為 (fspt01)
|
321 |
+
|
322 |
+
提供資訊:
|
323 |
+
1. 提供了公平交易委員會處分中華電信的資訊 (fspt01)
|
324 |
+
2. 提供了中華電信被罰款的資訊 (ebbf, cv17, obuibo2000, ufoubjlbotp, kbto4560, djuzqpsu, gvlbp21, qbvm40807, lojhiu326, lplpllp416, sa759, TvohIzvo, Bovunjbp, zffpvp, bsjbeof, nbocpx77, Epsffo1010, ay27771531, Gn4o, ex7931425, h6i630, hbnfljoh, ol950357, tpoza700, cfopbjdp, bqqmfxbsn, ibolthp2012, nfqbmjoh, tijofMpwf, dbnfmmbmb)</s>
|
325 |
+
2024-07-15 08:08:37 - INFO - __main__ - Prompt sample 228 of the raw training set:
|
326 |
+
|
327 |
+
<s>Human:
|
328 |
+
文章:輿情作者,日期,輿情內文
|
329 |
+
bcdtbfhvtb11,6/25,照上面說的1199以下等於是5G的量到降速吃到飽,1399開始才是真的5G吃到飽 現在4G吃到飽我家靠窗速度還有100M上下,晚上有時速度會慢一點不到10M 今天看到5G方案讓我猶豫要不要升級5G了 有2個問題 1.50G的額度用USB分享給電腦也算嗎?還是開Wifi熱點才是? 2.如果用1399是熱點超過50G連本身吃到飽也降速到10M?還是只有被分享的裝置會降速到10M?
|
330 |
+
qpqpqbm,06/25 11:22,熱點分享量到是鎖卡 只剩手機能用吧 目前看起來是
|
331 |
+
qpqpqbm,06/25 11:22,這樣玩 不夠這也是中華董事長有說目前涵蓋不過才有
|
332 |
+
qpqpqbm,06/25 11:22,限制 未來就不知道了
|
333 |
+
zjfstbo,06/25 11:31,限制一堆價錢這樣 以為消費者會買單嗎 天真
|
334 |
+
qpqpqbm,06/25 11:32,市場跟客戶需求有關 客戶有需求就要滿足XD
|
335 |
+
qpqpqbm,06/25 11:33,https://reurl.cc/L3zKMe 高資費1399以上用的爽就好
|
336 |
+
qpqpqbm,06/25 11:34,不夠用早該辦固網滿足辦公家庭需求...
|
337 |
+
uudnm,06/25 12:07,我也好奇,熱點流量用完,手機的吃到飽應該不會限速
|
338 |
+
uudnm,06/25 12:07,吧
|
339 |
+
qpqpqbm,06/25 12:11,手機沒影響 就是要擋大戶 分享流量夠一般基本用途
|
340 |
+
olit9412235,06/25 14:51,為什麼拿台北市跟東京比,因為都是大都市,比較對
|
341 |
+
olit9412235,06/25 14:51,象是 docomo 跟中華電信,也是兩邊電信龍頭,這是
|
342 |
+
olit9412235,06/25 14:51,親身體驗。這還是手機而已,如果是租分享器速度會
|
343 |
+
olit9412235,06/25 14:51,更快。
|
344 |
+
lbnjs,06/25 15:18,額度是全部,包含使用SIM卡網路的所有裝置。
|
345 |
+
gmjfstlz,06/25 21:14,容量哪有在分你開分享與否?有網路都是用5g跑阿
|
346 |
+
===
|
347 |
+
問題:請對以上討論進行所有意見的分類與摘要,類別有六種: 建議、質疑、疑問、抱怨、讚美、提供資訊。意見摘要用條列的方式呈現,每條意見之後用括號包含持此意見的發>言者,原本文章作者的意見也必須一併分析,同一條意見可能包含不只一人。
|
348 |
+
|
349 |
+
|
350 |
+
2024-07-15 08:08:37 - INFO - __main__ - Chosen sample 228 of the raw training set:
|
351 |
+
|
352 |
+
Assistant:
|
353 |
+
建議:
|
354 |
+
1. 可以考慮升級5G(bcdtbfhvtb11)
|
355 |
+
2. 需求不夠建議可以辦固網滿足辦公家庭需求(qpqpqbm)
|
356 |
+
|
357 |
+
質疑:
|
358 |
+
1. 對於新的5G方案有所疑慮,質疑是否值得升級(bcdtbfhvtb11)
|
359 |
+
2. 質疑行動網路公司將價格定得過高,以及設置太多限制(zjfstbo)
|
360 |
+
|
361 |
+
疑問:
|
362 |
+
1. 50G的額度是否包含用USB分享給電腦的數據(bcdtbfhvtb11)
|
363 |
+
2. 如果用1399是熱點超過50G連本身吃到飽也降速到10M(bcdtbfhvtb11)
|
364 |
+
3. 對於熱點流量用完後,手機的吃到飽是否會限速(uudnm)
|
365 |
+
|
366 |
+
抱怨:
|
367 |
+
1. 對於5G資費計劃有許多限制,價格又偏高感到不滿(zjfstbo)
|
368 |
+
|
369 |
+
讚美:
|
370 |
+
1. 沒有讚美的意見
|
371 |
+
|
372 |
+
提供資訊:
|
373 |
+
1. 分享數據到其他裝置也會計算在內(lbnjs, gmjfstlz)
|
374 |
+
2. 若熱點流量用完,手機所使用的數據還是5G數據並不受限制(uudnm, qpqpqbm)
|
375 |
+
3. 對於市場需求關係的說明(qpqpqbm)
|
376 |
+
4. 台北市與東京大都市的比較(olit9412235)
|
377 |
+
5. 提供相關的網路連結(qpqpqbm)</s>
|
378 |
+
2024-07-15 08:08:37 - INFO - __main__ - Rejected sample 228 of the raw training set:
|
379 |
+
|
380 |
+
Assistant:
|
381 |
+
建議:
|
382 |
+
1. 高資費1399以上用的爽就好 (qpqpqbm)
|
383 |
+
2. 不夠用早該辦固網滿足辦公家庭需求 (qpqpqbm)
|
384 |
+
|
385 |
+
質疑:
|
386 |
+
1. 限制一堆價錢這樣 以為消費者會買單嗎 天真 (zjfstbo)
|
387 |
+
|
388 |
+
疑問:
|
389 |
+
1. 1.50G的額度用USB分享給電腦也算嗎?還是開Wifi熱點才是? (bcdtbfhvtb11)
|
390 |
+
2. 如果用1399是熱點超過50G連本身吃到飽也降速到10M?還是只有被分享的裝置會降速到10M? (bcdtbfhvtb11)
|
391 |
+
|
392 |
+
抱怨:
|
393 |
+
1. 市場跟客戶需求有關 客戶有需求就要滿足XD (qpqpqbm)
|
394 |
+
|
395 |
+
讚美:
|
396 |
+
無
|
397 |
+
|
398 |
+
提供資訊:
|
399 |
+
1. 熱點分享量到是鎖卡 只剩手機能用吧 目前看起來是這樣玩 不夠這也是中華董事長有說目前涵蓋不過才有限制 未來就不知道了 (qpqpqbm)
|
400 |
+
2. 手機沒影響 就是要擋大戶 分享流量夠一般基本用途 (qpqpqbm)
|
401 |
+
3. 額度是全部,包含使用SIM卡網路的所有裝置 (lbnjs)
|
402 |
+
4. 容量哪有在分你開分享與否?有網路都是用5g跑阿 (gmjfstlz)
|
403 |
+
5. 為什麼拿台北市跟東京比,因為都是大都市,比較對象是 docomo 跟中華電信,也是兩邊電信龍頭,這是親身體驗 (olit9412235)</s>
|
404 |
+
2024-07-15 08:08:37 - INFO - __main__ - Prompt sample 51 of the raw training set:
|
405 |
+
|
406 |
+
<s>Human:
|
407 |
+
文章:輿情作者,日期,輿情內文
|
408 |
+
epodmjboh,4/22,大家好!! 小弟這次剛好符合中華的公教雙飽資格, 想衝一波, 但是只想辦個299的就好, 因為平常都是用預付卡買1G, 不過有看到299裡面, 有個優惠是群內優惠 免費, 想問這個 是要需要多加50加電話號碼進群組嗎? 還是就是299就可以直接設定電話進群組 對這個有點不懂,想問有無人可以解答 謝謝大大們
|
409 |
+
kbnft732,04/22 18:29,你要不要考慮直接問客服?
|
410 |
+
qvtiqvmm,04/22 18:34,好像不用加50。 剛好今天辦299有問。
|
411 |
+
yv3,04/22 18:44,台哥的群內免費試 每個單位有一個編號 例如台北市政
|
412 |
+
yv3,04/22 18:45,府教育局 編號是1234 那教育局的員工辦公教專案
|
413 |
+
===
|
414 |
+
問題:請對以上討論進行所有意見的分類與摘要,類別有六種: 建議、質疑、疑問、抱怨、讚美、提供資訊。意見摘要用條列的方式呈現,每條意見之後用括號包含持此意見的發>言者,原本��章作者的意見也必須一併分析,同一條意見可能包含不只一人。
|
415 |
+
|
416 |
+
|
417 |
+
2024-07-15 08:08:37 - INFO - __main__ - Chosen sample 51 of the raw training set:
|
418 |
+
|
419 |
+
Assistant:
|
420 |
+
- 疑問:想要了解299元的方案是不是可以直接加入群組,還是要多加50元,並希望有人能提供資訊 (epodmjboh)
|
421 |
+
- 建議:直接詢問客服以獲得最確實資訊 (kbnft732)
|
422 |
+
- 提供資訊:今天辦理299元專案,表示不需要加50元 (qvtiqvmm)
|
423 |
+
- 提供資訊:解釋群內免費的方式,每個單位組會有一個編號 (yv3)</s>
|
424 |
+
2024-07-15 08:08:37 - INFO - __main__ - Rejected sample 51 of the raw training set:
|
425 |
+
|
426 |
+
Assistant:
|
427 |
+
- 疑問:
|
428 |
+
- 想問這個 是要需要多加50加電話號碼進群組嗎? 還是就是299就可以直接設定電話進群組 (epodmjboh)
|
429 |
+
- 想問有無人可以解答 (epodmjboh)
|
430 |
+
|
431 |
+
- 建議:
|
432 |
+
- 你要不要考慮直接問客服? (kbnft732)
|
433 |
+
|
434 |
+
- 提供資訊:
|
435 |
+
- 好像不用加50。 剛好今天辦299有問。 (qvtiqvmm)
|
436 |
+
- 台哥的群內免費試 每個單位有一個編號 例如台北市政府教育局 編號是1234 那教育局的員工辦公教專案 (yv3)</s>
|
437 |
+
[INFO 20240712-1747] init DPOTrainer
|
438 |
+
[INFO 20240712-1747] init DPOTrainer[INFO 20240712-1747] init DPOTrainer
|
439 |
+
[INFO 20240712-1747] init DPOTrainer
|
440 |
+
|
441 |
+
[2024-07-15 08:08:45,260] [INFO] [partition_parameters.py:347:__exit__] finished initializing model - num_params = 291, num_elems = 7.24B
|
442 |
+
[2024-07-15 08:11:54,287] [INFO] [partition_parameters.py:347:__exit__] finished initializing model - num_params = 582, num_elems = 14.48B
|
443 |
+
2024-07-15 08:11:58 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7f79c52c4340>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
|
444 |
+
2024-07-15 08:12:07 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7f9341050370>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
|
445 |
+
2024-07-15 08:12:07 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7efed65856c0>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
|
446 |
+
2024-07-15 08:12:07 - WARNING - datasets.fingerprint - Parameter 'function'=<bound method DPOTrainer.tokenize_row of <trl.trainer.dpo_trainer.DPOTrainer object at 0x7f62a1e81bd0>> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.
|
447 |
+
[2024-07-15 08:12:09,915] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.2, git-hash=unknown, git-branch=unknown
|
448 |
+
[2024-07-15 08:12:09,928] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False
|
449 |
+
[2024-07-15 08:12:09,929] [INFO] [logging.py:96:log_dist] [Rank 0] Creating ZeRO Offload
|
450 |
+
[2024-07-15 08:12:10,036] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [begin]
|
451 |
+
[2024-07-15 08:12:10,037] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 7.42 GB CA 7.89 GB Max_CA 8 GB
|
452 |
+
[2024-07-15 08:12:10,037] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 17.32 GB, percent = 2.0%
|
453 |
+
Parameter Offload: Total persistent parameters: 266240 in 65 params
|
454 |
+
[2024-07-15 08:12:10,132] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [end]
|
455 |
+
[2024-07-15 08:12:10,132] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 7.89 GB Max_CA 8 GB
|
456 |
+
[2024-07-15 08:12:10,133] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 17.34 GB, percent = 2.0%
|
457 |
+
[2024-07-15 08:12:10,133] [INFO] [config.py:972:print] DeepSpeedEngine configuration:
|
458 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] activation_checkpointing_config {
|
459 |
+
"partition_activations": false,
|
460 |
+
"contiguous_memory_optimization": false,
|
461 |
+
"cpu_checkpointing": false,
|
462 |
+
"number_checkpoints": null,
|
463 |
+
"synchronize_checkpoint_boundary": false,
|
464 |
+
"profile": false
|
465 |
+
}
|
466 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True}
|
467 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] amp_enabled .................. False
|
468 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] amp_params ................... False
|
469 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] autotuning_config ............ {
|
470 |
+
"enabled": false,
|
471 |
+
"start_step": null,
|
472 |
+
"end_step": null,
|
473 |
+
"metric_path": null,
|
474 |
+
"arg_mappings": null,
|
475 |
+
"metric": "throughput",
|
476 |
+
"model_info": null,
|
477 |
+
"results_dir": "autotuning_results",
|
478 |
+
"exps_dir": "autotuning_exps",
|
479 |
+
"overwrite": true,
|
480 |
+
"fast": true,
|
481 |
+
"start_profile_step": 3,
|
482 |
+
"end_profile_step": 5,
|
483 |
+
"tuner_type": "gridsearch",
|
484 |
+
"tuner_early_stopping": 5,
|
485 |
+
"tuner_num_trials": 50,
|
486 |
+
"model_info_path": null,
|
487 |
+
"mp_size": 1,
|
488 |
+
"max_train_batch_size": null,
|
489 |
+
"min_train_batch_size": 1,
|
490 |
+
"max_train_micro_batch_size_per_gpu": 1.024000e+03,
|
491 |
+
"min_train_micro_batch_size_per_gpu": 1,
|
492 |
+
"num_tuning_micro_batch_sizes": 3
|
493 |
+
}
|
494 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] bfloat16_enabled ............. True
|
495 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] checkpoint_parallel_write_pipeline False
|
496 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] checkpoint_tag_validation_enabled True
|
497 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] checkpoint_tag_validation_fail False
|
498 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x7f79c5f67a00>
|
499 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] communication_data_type ...... None
|
500 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}
|
501 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] curriculum_enabled_legacy .... False
|
502 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] curriculum_params_legacy ..... False
|
503 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}
|
504 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] data_efficiency_enabled ...... False
|
505 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] dataloader_drop_last ......... False
|
506 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] disable_allgather ............ False
|
507 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] dump_state ................... False
|
508 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] dynamic_loss_scale_args ...... None
|
509 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_enabled ........... False
|
510 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_gas_boundary_resolution 1
|
511 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_layer_name ........ bert.encoder.layer
|
512 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_layer_num ......... 0
|
513 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_max_iter .......... 100
|
514 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_stability ......... 1e-06
|
515 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_tol ............... 0.01
|
516 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] eigenvalue_verbose ........... False
|
517 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] elasticity_enabled ........... False
|
518 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] flops_profiler_config ........ {
|
519 |
+
"enabled": false,
|
520 |
+
"recompute_fwd_factor": 0.0,
|
521 |
+
"profile_step": 1,
|
522 |
+
"module_depth": -1,
|
523 |
+
"top_modules": 1,
|
524 |
+
"detailed": true,
|
525 |
+
"output_file": null
|
526 |
+
}
|
527 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] fp16_auto_cast ............... None
|
528 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] fp16_enabled ................. False
|
529 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] fp16_master_weights_and_gradients False
|
530 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] global_rank .................. 0
|
531 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] grad_accum_dtype ............. None
|
532 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] gradient_accumulation_steps .. 2
|
533 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] gradient_clipping ............ 1.0
|
534 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] gradient_predivide_factor .... 1.0
|
535 |
+
[2024-07-15 08:12:10,134] [INFO] [config.py:976:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8
|
536 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] initial_dynamic_scale ........ 1
|
537 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] load_universal_checkpoint .... False
|
538 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] loss_scale ................... 1.0
|
539 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] memory_breakdown ............. False
|
540 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] mics_hierarchial_params_gather False
|
541 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] mics_shard_size .............. -1
|
542 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False
|
543 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] nebula_config ................ {
|
544 |
+
"enabled": false,
|
545 |
+
"persistent_storage_path": null,
|
546 |
+
"persistent_time_interval": 100,
|
547 |
+
"num_of_version_in_retention": 2,
|
548 |
+
"enable_nebula_load": true,
|
549 |
+
"load_path": null
|
550 |
+
}
|
551 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] optimizer_legacy_fusion ...... False
|
552 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] optimizer_name ............... None
|
553 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] optimizer_params ............. None
|
554 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0}
|
555 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] pld_enabled .................. False
|
556 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] pld_params ................... False
|
557 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] prescale_gradients ........... False
|
558 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] scheduler_name ............... None
|
559 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] scheduler_params ............. None
|
560 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] seq_parallel_communication_data_type torch.float32
|
561 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] sparse_attention ............. None
|
562 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] sparse_gradients_enabled ..... False
|
563 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] steps_per_print .............. inf
|
564 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] train_batch_size ............. 64
|
565 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] train_micro_batch_size_per_gpu 8
|
566 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] use_node_local_storage ....... False
|
567 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] wall_clock_breakdown ......... False
|
568 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] weight_quantization_config ... None
|
569 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] world_size ................... 4
|
570 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] zero_allow_untested_optimizer False
|
571 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] zero_config .................. stage=3 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=500,000,000 allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=DeepSpeedZeroOffloadParamConfig(device='none', nvme_path=None, buffer_count=5, buffer_size=100,000,000, max_in_cpu=1,000,000,000, pin_memory=False) offload_optimizer=DeepSpeedZeroOffloadOptimizerConfig(device='none', nvme_path=None, buffer_count=4, pin_memory=False, pipeline=False, pipeline_read=False, pipeline_write=False, fast_init=False) sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=True stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True
|
572 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] zero_enabled ................. True
|
573 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] zero_force_ds_cpu_optimizer .. True
|
574 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:976:print] zero_optimization_stage ...... 3
|
575 |
+
[2024-07-15 08:12:10,135] [INFO] [config.py:962:print_user_config] json = {
|
576 |
+
"train_batch_size": 64,
|
577 |
+
"train_micro_batch_size_per_gpu": 8,
|
578 |
+
"gradient_accumulation_steps": 2,
|
579 |
+
"zero_optimization": {
|
580 |
+
"stage": 3,
|
581 |
+
"offload_optimizer": {
|
582 |
+
"device": "none",
|
583 |
+
"nvme_path": null
|
584 |
+
},
|
585 |
+
"offload_param": {
|
586 |
+
"device": "none",
|
587 |
+
"nvme_path": null
|
588 |
+
},
|
589 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
590 |
+
},
|
591 |
+
"gradient_clipping": 1.0,
|
592 |
+
"steps_per_print": inf,
|
593 |
+
"bf16": {
|
594 |
+
"enabled": true
|
595 |
+
},
|
596 |
+
"fp16": {
|
597 |
+
"enabled": false
|
598 |
+
},
|
599 |
+
"zero_optimization.reduce_bucket_size": 1.677722e+07,
|
600 |
+
"zero_optimization.stage3_param_persistence_threshold": 4.096000e+04,
|
601 |
+
"zero_optimization.stage3_prefetch_bucket_size": 1.509949e+07
|
602 |
+
}
|
603 |
+
[INFO 20240712-1747] ☆☆☆
|
604 |
+
[DEBUG 20240712-1804] checkpoint = None
|
605 |
+
[INFO 20240712-1747] ☆☆☆
|
606 |
+
[DEBUG 20240712-1804] checkpoint = None
|
607 |
+
[INFO 20240712-1747] ☆☆☆
|
608 |
+
[DEBUG 20240712-1804] checkpoint = None
|
609 |
+
[INFO 20240712-1747] ☆☆☆
|
610 |
+
[DEBUG 20240712-1804] checkpoint = None
|
611 |
+
[2024-07-15 08:12:35,789] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.2, git-hash=unknown, git-branch=unknown
|
612 |
+
[2024-07-15 08:12:35,796] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False
|
613 |
+
[2024-07-15 08:12:35,797] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer
|
614 |
+
[2024-07-15 08:12:35,797] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer
|
615 |
+
[2024-07-15 08:12:35,808] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW
|
616 |
+
[2024-07-15 08:12:35,808] [INFO] [utils.py:56:is_zero_supported_optimizer] Checking ZeRO support for optimizer=AdamW type=<class 'torch.optim.adamw.AdamW'>
|
617 |
+
[2024-07-15 08:12:35,808] [INFO] [logging.py:96:log_dist] [Rank 0] Creating fp16 ZeRO stage 3 optimizer, MiCS is enabled False, Hierarchical params gather False
|
618 |
+
[2024-07-15 08:12:35,808] [INFO] [logging.py:96:log_dist] [Rank 0] Creating torch.bfloat16 ZeRO stage 3 optimizer
|
619 |
+
[2024-07-15 08:12:35,927] [INFO] [utils.py:802:see_memory_usage] Stage 3 initialize beginning
|
620 |
+
[2024-07-15 08:12:35,927] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 8 GB
|
621 |
+
[2024-07-15 08:12:35,928] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.62 GB, percent = 2.1%
|
622 |
+
[2024-07-15 08:12:35,929] [INFO] [stage3.py:126:__init__] Reduce bucket size 500,000,000
|
623 |
+
[2024-07-15 08:12:35,929] [INFO] [stage3.py:127:__init__] Prefetch bucket size 50,000,000
|
624 |
+
[2024-07-15 08:12:36,027] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [begin]
|
625 |
+
[2024-07-15 08:12:36,028] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 7 GB
|
626 |
+
[2024-07-15 08:12:36,028] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.62 GB, percent = 2.1%
|
627 |
+
Parameter Offload: Total persistent parameters: 266240 in 65 params
|
628 |
+
[2024-07-15 08:12:36,149] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [end]
|
629 |
+
[2024-07-15 08:12:36,150] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 7 GB
|
630 |
+
[2024-07-15 08:12:36,150] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.62 GB, percent = 2.1%
|
631 |
+
[2024-07-15 08:12:36,268] [INFO] [utils.py:802:see_memory_usage] Before creating fp16 partitions
|
632 |
+
[2024-07-15 08:12:36,269] [INFO] [utils.py:803:see_memory_usage] MA 6.74 GB Max_MA 6.74 GB CA 6.94 GB Max_CA 7 GB
|
633 |
+
[2024-07-15 08:12:36,269] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.62 GB, percent = 2.1%
|
634 |
+
[2024-07-15 08:12:37,788] [INFO] [utils.py:802:see_memory_usage] After creating fp16 partitions: 2
|
635 |
+
[2024-07-15 08:12:37,789] [INFO] [utils.py:803:see_memory_usage] MA 6.75 GB Max_MA 6.75 GB CA 7.28 GB Max_CA 7 GB
|
636 |
+
[2024-07-15 08:12:37,789] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.64 GB, percent = 2.2%
|
637 |
+
[2024-07-15 08:12:37,890] [INFO] [utils.py:802:see_memory_usage] Before creating fp32 partitions
|
638 |
+
[2024-07-15 08:12:37,890] [INFO] [utils.py:803:see_memory_usage] MA 6.75 GB Max_MA 6.75 GB CA 7.28 GB Max_CA 7 GB
|
639 |
+
[2024-07-15 08:12:37,891] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.64 GB, percent = 2.2%
|
640 |
+
[2024-07-15 08:12:37,998] [INFO] [utils.py:802:see_memory_usage] After creating fp32 partitions
|
641 |
+
[2024-07-15 08:12:37,999] [INFO] [utils.py:803:see_memory_usage] MA 13.49 GB Max_MA 14.97 GB CA 15.92 GB Max_CA 16 GB
|
642 |
+
[2024-07-15 08:12:37,999] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.64 GB, percent = 2.2%
|
643 |
+
[2024-07-15 08:12:38,100] [INFO] [utils.py:802:see_memory_usage] Before initializing optimizer states
|
644 |
+
[2024-07-15 08:12:38,101] [INFO] [utils.py:803:see_memory_usage] MA 13.49 GB Max_MA 13.49 GB CA 15.92 GB Max_CA 16 GB
|
645 |
+
[2024-07-15 08:12:38,101] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.64 GB, percent = 2.2%
|
646 |
+
[2024-07-15 08:12:38,267] [INFO] [utils.py:802:see_memory_usage] After initializing optimizer states
|
647 |
+
[2024-07-15 08:12:38,267] [INFO] [utils.py:803:see_memory_usage] MA 26.98 GB Max_MA 33.73 GB CA 36.97 GB Max_CA 37 GB
|
648 |
+
[2024-07-15 08:12:38,268] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.64 GB, percent = 2.2%
|
649 |
+
[2024-07-15 08:12:38,268] [INFO] [stage3.py:460:_setup_for_real_optimizer] optimizer state initialized
|
650 |
+
[2024-07-15 08:12:38,576] [INFO] [utils.py:802:see_memory_usage] After initializing ZeRO optimizer
|
651 |
+
[2024-07-15 08:12:38,576] [INFO] [utils.py:803:see_memory_usage] MA 31.29 GB Max_MA 31.77 GB CA 51.01 GB Max_CA 51 GB
|
652 |
+
[2024-07-15 08:12:38,577] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 18.64 GB, percent = 2.2%
|
653 |
+
[2024-07-15 08:12:38,577] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = AdamW
|
654 |
+
[2024-07-15 08:12:38,577] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler
|
655 |
+
[2024-07-15 08:12:38,577] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = None
|
656 |
+
[2024-07-15 08:12:38,577] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0], mom=[(0.9, 0.999)]
|
657 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:972:print] DeepSpeedEngine configuration:
|
658 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] activation_checkpointing_config {
|
659 |
+
"partition_activations": false,
|
660 |
+
"contiguous_memory_optimization": false,
|
661 |
+
"cpu_checkpointing": false,
|
662 |
+
"number_checkpoints": null,
|
663 |
+
"synchronize_checkpoint_boundary": false,
|
664 |
+
"profile": false
|
665 |
+
}
|
666 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True}
|
667 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] amp_enabled .................. False
|
668 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] amp_params ................... False
|
669 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] autotuning_config ............ {
|
670 |
+
"enabled": false,
|
671 |
+
"start_step": null,
|
672 |
+
"end_step": null,
|
673 |
+
"metric_path": null,
|
674 |
+
"arg_mappings": null,
|
675 |
+
"metric": "throughput",
|
676 |
+
"model_info": null,
|
677 |
+
"results_dir": "autotuning_results",
|
678 |
+
"exps_dir": "autotuning_exps",
|
679 |
+
"overwrite": true,
|
680 |
+
"fast": true,
|
681 |
+
"start_profile_step": 3,
|
682 |
+
"end_profile_step": 5,
|
683 |
+
"tuner_type": "gridsearch",
|
684 |
+
"tuner_early_stopping": 5,
|
685 |
+
"tuner_num_trials": 50,
|
686 |
+
"model_info_path": null,
|
687 |
+
"mp_size": 1,
|
688 |
+
"max_train_batch_size": null,
|
689 |
+
"min_train_batch_size": 1,
|
690 |
+
"max_train_micro_batch_size_per_gpu": 1.024000e+03,
|
691 |
+
"min_train_micro_batch_size_per_gpu": 1,
|
692 |
+
"num_tuning_micro_batch_sizes": 3
|
693 |
+
}
|
694 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] bfloat16_enabled ............. True
|
695 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] checkpoint_parallel_write_pipeline False
|
696 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] checkpoint_tag_validation_enabled True
|
697 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] checkpoint_tag_validation_fail False
|
698 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x7f7990a07520>
|
699 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] communication_data_type ...... None
|
700 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}
|
701 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] curriculum_enabled_legacy .... False
|
702 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] curriculum_params_legacy ..... False
|
703 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}
|
704 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] data_efficiency_enabled ...... False
|
705 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] dataloader_drop_last ......... False
|
706 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] disable_allgather ............ False
|
707 |
+
[2024-07-15 08:12:38,578] [INFO] [config.py:976:print] dump_state ................... False
|
708 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] dynamic_loss_scale_args ...... None
|
709 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_enabled ........... False
|
710 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_gas_boundary_resolution 1
|
711 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_layer_name ........ bert.encoder.layer
|
712 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_layer_num ......... 0
|
713 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_max_iter .......... 100
|
714 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_stability ......... 1e-06
|
715 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_tol ............... 0.01
|
716 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] eigenvalue_verbose ........... False
|
717 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] elasticity_enabled ........... False
|
718 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] flops_profiler_config ........ {
|
719 |
+
"enabled": false,
|
720 |
+
"recompute_fwd_factor": 0.0,
|
721 |
+
"profile_step": 1,
|
722 |
+
"module_depth": -1,
|
723 |
+
"top_modules": 1,
|
724 |
+
"detailed": true,
|
725 |
+
"output_file": null
|
726 |
+
}
|
727 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] fp16_auto_cast ............... None
|
728 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] fp16_enabled ................. False
|
729 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] fp16_master_weights_and_gradients False
|
730 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] global_rank .................. 0
|
731 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] grad_accum_dtype ............. None
|
732 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] gradient_accumulation_steps .. 2
|
733 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] gradient_clipping ............ 1.0
|
734 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] gradient_predivide_factor .... 1.0
|
735 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8
|
736 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] initial_dynamic_scale ........ 1
|
737 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] load_universal_checkpoint .... False
|
738 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] loss_scale ................... 1.0
|
739 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] memory_breakdown ............. False
|
740 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] mics_hierarchial_params_gather False
|
741 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] mics_shard_size .............. -1
|
742 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False
|
743 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] nebula_config ................ {
|
744 |
+
"enabled": false,
|
745 |
+
"persistent_storage_path": null,
|
746 |
+
"persistent_time_interval": 100,
|
747 |
+
"num_of_version_in_retention": 2,
|
748 |
+
"enable_nebula_load": true,
|
749 |
+
"load_path": null
|
750 |
+
}
|
751 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] optimizer_legacy_fusion ...... False
|
752 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] optimizer_name ............... None
|
753 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] optimizer_params ............. None
|
754 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0}
|
755 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] pld_enabled .................. False
|
756 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] pld_params ................... False
|
757 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] prescale_gradients ........... False
|
758 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] scheduler_name ............... None
|
759 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] scheduler_params ............. None
|
760 |
+
[2024-07-15 08:12:38,579] [INFO] [config.py:976:print] seq_parallel_communication_data_type torch.float32
|
761 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] sparse_attention ............. None
|
762 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] sparse_gradients_enabled ..... False
|
763 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] steps_per_print .............. inf
|
764 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] train_batch_size ............. 64
|
765 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] train_micro_batch_size_per_gpu 8
|
766 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] use_node_local_storage ....... False
|
767 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] wall_clock_breakdown ......... False
|
768 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] weight_quantization_config ... None
|
769 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] world_size ................... 4
|
770 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] zero_allow_untested_optimizer True
|
771 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] zero_config .................. stage=3 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=500,000,000 allgather_partitions=True allgather_bucket_size=500,000,000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=DeepSpeedZeroOffloadParamConfig(device='none', nvme_path=None, buffer_count=5, buffer_size=100,000,000, max_in_cpu=1,000,000,000, pin_memory=False) offload_optimizer=DeepSpeedZeroOffloadOptimizerConfig(device='none', nvme_path=None, buffer_count=4, pin_memory=False, pipeline=False, pipeline_read=False, pipeline_write=False, fast_init=False) sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=True stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True
|
772 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] zero_enabled ................. True
|
773 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] zero_force_ds_cpu_optimizer .. True
|
774 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:976:print] zero_optimization_stage ...... 3
|
775 |
+
[2024-07-15 08:12:38,580] [INFO] [config.py:962:print_user_config] json = {
|
776 |
+
"train_batch_size": 64,
|
777 |
+
"train_micro_batch_size_per_gpu": 8,
|
778 |
+
"gradient_accumulation_steps": 2,
|
779 |
+
"zero_optimization": {
|
780 |
+
"stage": 3,
|
781 |
+
"offload_optimizer": {
|
782 |
+
"device": "none",
|
783 |
+
"nvme_path": null
|
784 |
+
},
|
785 |
+
"offload_param": {
|
786 |
+
"device": "none",
|
787 |
+
"nvme_path": null
|
788 |
+
},
|
789 |
+
"stage3_gather_16bit_weights_on_model_save": true
|
790 |
+
},
|
791 |
+
"gradient_clipping": 1.0,
|
792 |
+
"steps_per_print": inf,
|
793 |
+
"bf16": {
|
794 |
+
"enabled": true
|
795 |
+
},
|
796 |
+
"fp16": {
|
797 |
+
"enabled": false
|
798 |
+
},
|
799 |
+
"zero_allow_untested_optimizer": true
|
800 |
+
}
|
801 |
+
[2024-07-15 08:13:10,011] [WARNING] [stage3.py:1949:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time
|
802 |
+
{'loss': 0.6931, 'grad_norm': 21.960294652093665, 'learning_rate': 1.6666666666666665e-07, 'rewards/chosen': 0.0, 'rewards/rejected': 0.0, 'rewards/accuracies': 0.0, 'rewards/margins': 0.0, 'logps/rejected': -430.01824951171875, 'logps/chosen': -522.2332763671875, 'logits/rejected': -1.5321521759033203, 'logits/chosen': -1.5251624584197998, 'epoch': 0.04}
|
803 |
+
{'loss': 0.6931, 'grad_norm': 20.29259471509865, 'learning_rate': 3.333333333333333e-07, 'rewards/chosen': 0.0, 'rewards/rejected': 0.0, 'rewards/accuracies': 0.0, 'rewards/margins': 0.0, 'logps/rejected': -407.6622314453125, 'logps/chosen': -442.01617431640625, 'logits/rejected': -1.5653201341629028, 'logits/chosen': -1.58231520652771, 'epoch': 0.07}
|
804 |
+
{'loss': 0.6925, 'grad_norm': 21.124662560316178, 'learning_rate': 5e-07, 'rewards/chosen': -0.0025113581214100122, 'rewards/rejected': -0.000655155279673636, 'rewards/accuracies': 0.3125, 'rewards/margins': -0.0018562028417363763, 'logps/rejected': -416.7310485839844, 'logps/chosen': -491.6346740722656, 'logits/rejected': -1.5683329105377197, 'logits/chosen': -1.5681908130645752, 'epoch': 0.11}
|
805 |
+
{'loss': 0.6914, 'grad_norm': 21.322323109359544, 'learning_rate': 4.978612153434526e-07, 'rewards/chosen': 0.000672187830787152, 'rewards/rejected': -0.0027900219429284334, 'rewards/accuracies': 0.6875, 'rewards/margins': 0.0034622098319232464, 'logps/rejected': -379.40631103515625, 'logps/chosen': -509.2655029296875, 'logits/rejected': -1.6126008033752441, 'logits/chosen': -1.537656307220459, 'epoch': 0.15}
|
806 |
+
{'loss': 0.6851, 'grad_norm': 20.49472730639404, 'learning_rate': 4.91481456572267e-07, 'rewards/chosen': 0.003443555673584342, 'rewards/rejected': -0.011739811860024929, 'rewards/accuracies': 0.875, 'rewards/margins': 0.01518336683511734, 'logps/rejected': -403.841796875, 'logps/chosen': -488.357421875, 'logits/rejected': -1.5996417999267578, 'logits/chosen': -1.5487549304962158, 'epoch': 0.19}
|
807 |
+
{'loss': 0.6694, 'grad_norm': 21.062141100447082, 'learning_rate': 4.809698831278217e-07, 'rewards/chosen': 0.005675068125128746, 'rewards/rejected': -0.03574930876493454, 'rewards/accuracies': 1.0, 'rewards/margins': 0.041424378752708435, 'logps/rejected': -404.27301025390625, 'logps/chosen': -471.77337646484375, 'logits/rejected': -1.543208360671997, 'logits/chosen': -1.4875125885009766, 'epoch': 0.22}
|
808 |
+
{'loss': 0.6585, 'grad_norm': 19.360093428533585, 'learning_rate': 4.6650635094610966e-07, 'rewards/chosen': 0.003629775019362569, 'rewards/rejected': -0.057777851819992065, 'rewards/accuracies': 1.0, 'rewards/margins': 0.061407629400491714, 'logps/rejected': -342.85699462890625, 'logps/chosen': -442.50823974609375, 'logits/rejected': -1.5954453945159912, 'logits/chosen': -1.5651326179504395, 'epoch': 0.26}
|
809 |
+
{'loss': 0.6262, 'grad_norm': 20.593014111238194, 'learning_rate': 4.483383350728088e-07, 'rewards/chosen': 0.012023219838738441, 'rewards/rejected': -0.1112871766090393, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.123310387134552, 'logps/rejected': -454.96221923828125, 'logps/chosen': -557.0953979492188, 'logits/rejected': -1.4964159727096558, 'logits/chosen': -1.4438291788101196, 'epoch': 0.3}
|
810 |
+
{'loss': 0.61, 'grad_norm': 18.97598534624882, 'learning_rate': 4.2677669529663686e-07, 'rewards/chosen': 0.005186537280678749, 'rewards/rejected': -0.20743262767791748, 'rewards/accuracies': 1.0, 'rewards/margins': 0.21261915564537048, 'logps/rejected': -464.10333251953125, 'logps/chosen': -545.1387939453125, 'logits/rejected': -1.501185417175293, 'logits/chosen': -1.4766960144042969, 'epoch': 0.33}
|
811 |
+
{'loss': 0.5895, 'grad_norm': 18.27743651838518, 'learning_rate': 4.0219035725218013e-07, 'rewards/chosen': 0.0059821512550115585, 'rewards/rejected': -0.2528378963470459, 'rewards/accuracies': 1.0, 'rewards/margins': 0.2588200271129608, 'logps/rejected': -449.51141357421875, 'logps/chosen': -547.5632934570312, 'logits/rejected': -1.575829029083252, 'logits/chosen': -1.577054500579834, 'epoch': 0.37}
|
812 |
+
{'loss': 0.5797, 'grad_norm': 17.63288173591455, 'learning_rate': 3.75e-07, 'rewards/chosen': -0.056207820773124695, 'rewards/rejected': -0.24953657388687134, 'rewards/accuracies': 1.0, 'rewards/margins': 0.19332876801490784, 'logps/rejected': -381.8707275390625, 'logps/chosen': -413.1561279296875, 'logits/rejected': -1.5630258321762085, 'logits/chosen': -1.528336524963379, 'epoch': 0.41}
|
813 |
+
{'loss': 0.5321, 'grad_norm': 19.011135172725975, 'learning_rate': 3.4567085809127245e-07, 'rewards/chosen': -0.09093473851680756, 'rewards/rejected': -0.410855233669281, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.31992048025131226, 'logps/rejected': -390.5659484863281, 'logps/chosen': -429.57568359375, 'logits/rejected': -1.5054963827133179, 'logits/chosen': -1.518822431564331, 'epoch': 0.44}
|
814 |
+
{'loss': 0.4824, 'grad_norm': 19.115556004222547, 'learning_rate': 3.147047612756302e-07, 'rewards/chosen': -0.12970858812332153, 'rewards/rejected': -0.6150213479995728, 'rewards/accuracies': 1.0, 'rewards/margins': 0.4853127896785736, 'logps/rejected': -425.39727783203125, 'logps/chosen': -497.06396484375, 'logits/rejected': -1.4986257553100586, 'logits/chosen': -1.4965465068817139, 'epoch': 0.48}
|
815 |
+
{'loss': 0.4697, 'grad_norm': 19.291540876536054, 'learning_rate': 2.826315480550129e-07, 'rewards/chosen': -0.246780663728714, 'rewards/rejected': -0.7276896238327026, 'rewards/accuracies': 1.0, 'rewards/margins': 0.48090896010398865, 'logps/rejected': -438.9479675292969, 'logps/chosen': -486.5482482910156, 'logits/rejected': -1.4534223079681396, 'logits/chosen': -1.4486994743347168, 'epoch': 0.52}
|
816 |
+
{'loss': 0.4387, 'grad_norm': 18.729411908069906, 'learning_rate': 2.5e-07, 'rewards/chosen': -0.365586519241333, 'rewards/rejected': -0.9307430982589722, 'rewards/accuracies': 1.0, 'rewards/margins': 0.5651566386222839, 'logps/rejected': -494.84197998046875, 'logps/chosen': -521.0465698242188, 'logits/rejected': -1.5101096630096436, 'logits/chosen': -1.5300798416137695, 'epoch': 0.56}
|
817 |
+
{'loss': 0.4176, 'grad_norm': 18.803005444933415, 'learning_rate': 2.1736845194498716e-07, 'rewards/chosen': -0.517939567565918, 'rewards/rejected': -1.0383610725402832, 'rewards/accuracies': 0.875, 'rewards/margins': 0.5204216241836548, 'logps/rejected': -369.7915344238281, 'logps/chosen': -459.795654296875, 'logits/rejected': -1.48337721824646, 'logits/chosen': -1.4446580410003662, 'epoch': 0.59}
|
818 |
+
{'loss': 0.403, 'grad_norm': 16.74794343891432, 'learning_rate': 1.8529523872436977e-07, 'rewards/chosen': -0.6889065504074097, 'rewards/rejected': -1.3369628190994263, 'rewards/accuracies': 1.0, 'rewards/margins': 0.6480563282966614, 'logps/rejected': -611.8056640625, 'logps/chosen': -628.4656372070312, 'logits/rejected': -1.4669878482818604, 'logits/chosen': -1.4267468452453613, 'epoch': 0.63}
|
819 |
+
{'loss': 0.4156, 'grad_norm': 16.666326698051094, 'learning_rate': 1.5432914190872756e-07, 'rewards/chosen': -0.7545160055160522, 'rewards/rejected': -1.355210304260254, 'rewards/accuracies': 0.875, 'rewards/margins': 0.6006942391395569, 'logps/rejected': -625.54248046875, 'logps/chosen': -662.2476806640625, 'logits/rejected': -1.4642560482025146, 'logits/chosen': -1.4678699970245361, 'epoch': 0.67}
|
820 |
+
{'loss': 0.3557, 'grad_norm': 20.816617967599026, 'learning_rate': 1.2500000000000005e-07, 'rewards/chosen': -0.7387192249298096, 'rewards/rejected': -1.7132151126861572, 'rewards/accuracies': 1.0, 'rewards/margins': 0.9744957685470581, 'logps/rejected': -571.2609252929688, 'logps/chosen': -588.0455322265625, 'logits/rejected': -1.5267467498779297, 'logits/chosen': -1.5228757858276367, 'epoch': 0.7}
|
821 |
+
{'loss': 0.3822, 'grad_norm': 15.44110172622846, 'learning_rate': 9.780964274781983e-08, 'rewards/chosen': -0.7607905268669128, 'rewards/rejected': -1.6602680683135986, 'rewards/accuracies': 1.0, 'rewards/margins': 0.899477481842041, 'logps/rejected': -557.4992065429688, 'logps/chosen': -621.6253051757812, 'logits/rejected': -1.3522812128067017, 'logits/chosen': -1.3689507246017456, 'epoch': 0.74}
|
822 |
+
{'loss': 0.3529, 'grad_norm': 15.868660349315958, 'learning_rate': 7.322330470336313e-08, 'rewards/chosen': -0.9673899412155151, 'rewards/rejected': -1.7681092023849487, 'rewards/accuracies': 1.0, 'rewards/margins': 0.8007193207740784, 'logps/rejected': -677.4046630859375, 'logps/chosen': -700.6104125976562, 'logits/rejected': -1.4624991416931152, 'logits/chosen': -1.4828577041625977, 'epoch': 0.78}
|
823 |
+
{'loss': 0.3545, 'grad_norm': 15.694101584565033, 'learning_rate': 5.166166492719124e-08, 'rewards/chosen': -0.9602385759353638, 'rewards/rejected': -1.9203271865844727, 'rewards/accuracies': 1.0, 'rewards/margins': 0.9600885510444641, 'logps/rejected': -537.06787109375, 'logps/chosen': -577.3356323242188, 'logits/rejected': -1.4121013879776, 'logits/chosen': -1.4382765293121338, 'epoch': 0.81}
|
824 |
+
{'loss': 0.3302, 'grad_norm': 14.540270530925572, 'learning_rate': 3.349364905389032e-08, 'rewards/chosen': -0.7483774423599243, 'rewards/rejected': -2.161633253097534, 'rewards/accuracies': 1.0, 'rewards/margins': 1.4132558107376099, 'logps/rejected': -686.2198486328125, 'logps/chosen': -638.3490600585938, 'logits/rejected': -1.3795686960220337, 'logits/chosen': -1.386987566947937, 'epoch': 0.85}
|
825 |
+
{'loss': 0.3296, 'grad_norm': 17.790158271068087, 'learning_rate': 1.9030116872178314e-08, 'rewards/chosen': -0.8251708149909973, 'rewards/rejected': -1.8419311046600342, 'rewards/accuracies': 1.0, 'rewards/margins': 1.0167603492736816, 'logps/rejected': -591.7145385742188, 'logps/chosen': -697.348388671875, 'logits/rejected': -1.4148858785629272, 'logits/chosen': -1.3577136993408203, 'epoch': 0.89}
|
826 |
+
{'loss': 0.3282, 'grad_norm': 15.54643458961614, 'learning_rate': 8.518543427732949e-09, 'rewards/chosen': -0.9578792452812195, 'rewards/rejected': -1.9465538263320923, 'rewards/accuracies': 0.9375, 'rewards/margins': 0.988674521446228, 'logps/rejected': -653.150146484375, 'logps/chosen': -599.9559326171875, 'logits/rejected': -1.4120821952819824, 'logits/chosen': -1.4623993635177612, 'epoch': 0.93}
|
827 |
+
{'loss': 0.3016, 'grad_norm': 16.648883686759966, 'learning_rate': 2.1387846565474044e-09, 'rewards/chosen': -1.085956335067749, 'rewards/rejected': -2.2164981365203857, 'rewards/accuracies': 0.9375, 'rewards/margins': 1.1305416822433472, 'logps/rejected': -615.1410522460938, 'logps/chosen': -623.4697265625, 'logits/rejected': -1.327600359916687, 'logits/chosen': -1.3788025379180908, 'epoch': 0.96}
|
828 |
+
{'loss': 0.3356, 'grad_norm': 13.437946112135876, 'learning_rate': 0.0, 'rewards/chosen': -0.9807089567184448, 'rewards/rejected': -2.008396625518799, 'rewards/accuracies': 0.9375, 'rewards/margins': 1.0276877880096436, 'logps/rejected': -537.026611328125, 'logps/chosen': -570.8668212890625, 'logits/rejected': -1.4338197708129883, 'logits/chosen': -1.4525063037872314, 'epoch': 1.0}
|
829 |
+
[2024-07-15 08:20:51,092] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step27 is about to be saved!
|
830 |
+
[2024-07-15 08:20:51,121] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: DPO/CHT-6_20240715-1255/checkpoint-27/global_step27/zero_pp_rank_0_mp_rank_00_model_states.pt
|
831 |
+
[2024-07-15 08:20:51,122] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving DPO/CHT-6_20240715-1255/checkpoint-27/global_step27/zero_pp_rank_0_mp_rank_00_model_states.pt...
|
832 |
+
[2024-07-15 08:20:51,199] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved DPO/CHT-6_20240715-1255/checkpoint-27/global_step27/zero_pp_rank_0_mp_rank_00_model_states.pt.
|
833 |
+
[2024-07-15 08:20:51,269] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving DPO/CHT-6_20240715-1255/checkpoint-27/global_step27/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt...
|
834 |
+
[2024-07-15 08:26:24,053] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved DPO/CHT-6_20240715-1255/checkpoint-27/global_step27/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt.
|
835 |
+
[2024-07-15 08:26:24,128] [INFO] [engine.py:3393:_save_zero_checkpoint] zero checkpoint saved DPO/CHT-6_20240715-1255/checkpoint-27/global_step27/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
|
836 |
+
[2024-07-15 08:26:31,282] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step27 is ready now!
|
837 |
+
[INFO 20240712-1747] ☆☆☆☆☆
|
838 |
+
[INFO 20240712-1747] ☆☆☆☆☆
|
839 |
+
[INFO 20240712-1747] ☆☆☆☆☆
|
840 |
+
{'train_runtime': 833.3436, 'train_samples_per_second': 2.074, 'train_steps_per_second': 0.032, 'train_loss': 0.4969749693517332, 'epoch': 1.0}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:388a5a6eb27ed5c1aaa188949cbae9dec3650cc0e7ed58437b74d8ba5c0b0f77
|
3 |
+
size 7352
|