Update sample_finetune.py
#42
by
wwwaj
- opened
- sample_finetune.py +118 -29
sample_finetune.py
CHANGED
@@ -1,28 +1,68 @@
|
|
1 |
-
import
|
|
|
|
|
|
|
2 |
from datasets import load_dataset
|
|
|
|
|
|
|
3 |
from trl import SFTTrainer
|
4 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments
|
5 |
|
6 |
"""
|
7 |
A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
|
8 |
-
a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
conda install -c conda-forge accelerate
|
12 |
-
|
|
|
|
|
|
|
13 |
accelerate config
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
accelerate env
|
18 |
-
|
19 |
accelerate launch sample_finetune.py
|
20 |
"""
|
21 |
|
|
|
|
|
|
|
22 |
###################
|
23 |
# Hyper-parameters
|
24 |
###################
|
25 |
-
|
26 |
"bf16": True,
|
27 |
"do_eval": False,
|
28 |
"learning_rate": 5.0e-06,
|
@@ -35,7 +75,7 @@ args = {
|
|
35 |
"output_dir": "./checkpoint_dir",
|
36 |
"overwrite_output_dir": True,
|
37 |
"per_device_eval_batch_size": 4,
|
38 |
-
"per_device_train_batch_size":
|
39 |
"remove_unused_columns": True,
|
40 |
"save_steps": 100,
|
41 |
"save_total_limit": 1,
|
@@ -45,8 +85,43 @@ args = {
|
|
45 |
"gradient_accumulation_steps": 1,
|
46 |
"warmup_ratio": 0.2,
|
47 |
}
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
################
|
52 |
# Modle Loading
|
@@ -58,14 +133,16 @@ model_kwargs = dict(
|
|
58 |
trust_remote_code=True,
|
59 |
attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
|
60 |
torch_dtype=torch.bfloat16,
|
61 |
-
device_map=
|
62 |
)
|
63 |
model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
|
64 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
|
|
|
65 |
tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
|
66 |
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
|
67 |
tokenizer.padding_side = 'right'
|
68 |
|
|
|
69 |
##################
|
70 |
# Data Processing
|
71 |
##################
|
@@ -82,26 +159,36 @@ def apply_chat_template(
|
|
82 |
return example
|
83 |
|
84 |
raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
|
85 |
-
|
|
|
|
|
86 |
|
87 |
-
|
88 |
apply_chat_template,
|
89 |
fn_kwargs={"tokenizer": tokenizer},
|
90 |
-
num_proc=
|
91 |
remove_columns=column_names,
|
92 |
-
desc="Applying chat template",
|
93 |
)
|
94 |
-
|
95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
###########
|
98 |
# Training
|
99 |
###########
|
100 |
trainer = SFTTrainer(
|
101 |
model=model,
|
102 |
-
args=
|
103 |
-
|
104 |
-
|
|
|
105 |
max_seq_length=2048,
|
106 |
dataset_text_field="text",
|
107 |
tokenizer=tokenizer,
|
@@ -113,16 +200,18 @@ trainer.log_metrics("train", metrics)
|
|
113 |
trainer.save_metrics("train", metrics)
|
114 |
trainer.save_state()
|
115 |
|
|
|
116 |
#############
|
117 |
# Evaluation
|
118 |
#############
|
119 |
tokenizer.padding_side = 'left'
|
120 |
metrics = trainer.evaluate()
|
121 |
-
metrics["eval_samples"] = len(
|
122 |
trainer.log_metrics("eval", metrics)
|
123 |
trainer.save_metrics("eval", metrics)
|
124 |
|
125 |
-
|
126 |
-
#
|
127 |
-
|
128 |
-
|
|
|
|
1 |
+
import sys
|
2 |
+
import logging
|
3 |
+
|
4 |
+
import datasets
|
5 |
from datasets import load_dataset
|
6 |
+
from peft import LoraConfig
|
7 |
+
import torch
|
8 |
+
import transformers
|
9 |
from trl import SFTTrainer
|
10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
|
11 |
|
12 |
"""
|
13 |
A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
|
14 |
+
a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
|
15 |
+
This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
|
16 |
+
script can be run on V100 or later generation GPUs. Here are some suggestions on
|
17 |
+
futher reducing memory consumption:
|
18 |
+
- reduce batch size
|
19 |
+
- decrease lora dimension
|
20 |
+
- restrict lora target modules
|
21 |
+
Please follow these steps to run the script:
|
22 |
+
1. Install dependencies:
|
23 |
conda install -c conda-forge accelerate
|
24 |
+
pip3 install -i https://pypi.org/simple/ bitsandbytes
|
25 |
+
pip3 install peft
|
26 |
+
pip3 install deepspeed
|
27 |
+
2. Setup accelerate and deepspeed config based on the machine used:
|
28 |
accelerate config
|
29 |
+
Here is a sample config for deepspeed zero3:
|
30 |
+
compute_environment: LOCAL_MACHINE
|
31 |
+
debug: false
|
32 |
+
deepspeed_config:
|
33 |
+
gradient_accumulation_steps: 1
|
34 |
+
offload_optimizer_device: none
|
35 |
+
offload_param_device: none
|
36 |
+
zero3_init_flag: true
|
37 |
+
zero3_save_16bit_model: true
|
38 |
+
zero_stage: 3
|
39 |
+
distributed_type: DEEPSPEED
|
40 |
+
downcast_bf16: 'no'
|
41 |
+
enable_cpu_affinity: false
|
42 |
+
machine_rank: 0
|
43 |
+
main_training_function: main
|
44 |
+
mixed_precision: bf16
|
45 |
+
num_machines: 1
|
46 |
+
num_processes: 4
|
47 |
+
rdzv_backend: static
|
48 |
+
same_network: true
|
49 |
+
tpu_env: []
|
50 |
+
tpu_use_cluster: false
|
51 |
+
tpu_use_sudo: false
|
52 |
+
use_cpu: false
|
53 |
+
3. check accelerate config:
|
54 |
accelerate env
|
55 |
+
4. Run the code:
|
56 |
accelerate launch sample_finetune.py
|
57 |
"""
|
58 |
|
59 |
+
logger = logging.getLogger(__name__)
|
60 |
+
|
61 |
+
|
62 |
###################
|
63 |
# Hyper-parameters
|
64 |
###################
|
65 |
+
training_config = {
|
66 |
"bf16": True,
|
67 |
"do_eval": False,
|
68 |
"learning_rate": 5.0e-06,
|
|
|
75 |
"output_dir": "./checkpoint_dir",
|
76 |
"overwrite_output_dir": True,
|
77 |
"per_device_eval_batch_size": 4,
|
78 |
+
"per_device_train_batch_size": 4,
|
79 |
"remove_unused_columns": True,
|
80 |
"save_steps": 100,
|
81 |
"save_total_limit": 1,
|
|
|
85 |
"gradient_accumulation_steps": 1,
|
86 |
"warmup_ratio": 0.2,
|
87 |
}
|
88 |
+
|
89 |
+
peft_config = {
|
90 |
+
"r": 16,
|
91 |
+
"lora_alpha": 32,
|
92 |
+
"lora_dropout": 0.05,
|
93 |
+
"bias": "none",
|
94 |
+
"task_type": "CAUSAL_LM",
|
95 |
+
"target_modules": "all-linear",
|
96 |
+
"modules_to_save": None,
|
97 |
+
}
|
98 |
+
train_conf = TrainingArguments(**training_config)
|
99 |
+
peft_conf = LoraConfig(**peft_config)
|
100 |
+
|
101 |
+
|
102 |
+
###############
|
103 |
+
# Setup logging
|
104 |
+
###############
|
105 |
+
logging.basicConfig(
|
106 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
107 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
108 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
109 |
+
)
|
110 |
+
log_level = train_conf.get_process_log_level()
|
111 |
+
logger.setLevel(log_level)
|
112 |
+
datasets.utils.logging.set_verbosity(log_level)
|
113 |
+
transformers.utils.logging.set_verbosity(log_level)
|
114 |
+
transformers.utils.logging.enable_default_handler()
|
115 |
+
transformers.utils.logging.enable_explicit_format()
|
116 |
+
|
117 |
+
# Log on each process a small summary
|
118 |
+
logger.warning(
|
119 |
+
f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
|
120 |
+
+ f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
|
121 |
+
)
|
122 |
+
logger.info(f"Training/evaluation parameters {train_conf}")
|
123 |
+
logger.info(f"PEFT parameters {peft_conf}")
|
124 |
+
|
125 |
|
126 |
################
|
127 |
# Modle Loading
|
|
|
133 |
trust_remote_code=True,
|
134 |
attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
|
135 |
torch_dtype=torch.bfloat16,
|
136 |
+
device_map=None
|
137 |
)
|
138 |
model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
|
139 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
|
140 |
+
tokenizer.model_max_length = 2048
|
141 |
tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
|
142 |
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
|
143 |
tokenizer.padding_side = 'right'
|
144 |
|
145 |
+
|
146 |
##################
|
147 |
# Data Processing
|
148 |
##################
|
|
|
159 |
return example
|
160 |
|
161 |
raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
|
162 |
+
train_dataset = raw_dataset["train_sft"]
|
163 |
+
test_dataset = raw_dataset["test_sft"]
|
164 |
+
column_names = list(train_dataset.features)
|
165 |
|
166 |
+
processed_train_dataset = train_dataset.map(
|
167 |
apply_chat_template,
|
168 |
fn_kwargs={"tokenizer": tokenizer},
|
169 |
+
num_proc=10,
|
170 |
remove_columns=column_names,
|
171 |
+
desc="Applying chat template to train_sft",
|
172 |
)
|
173 |
+
|
174 |
+
processed_test_dataset = test_dataset.map(
|
175 |
+
apply_chat_template,
|
176 |
+
fn_kwargs={"tokenizer": tokenizer},
|
177 |
+
num_proc=10,
|
178 |
+
remove_columns=column_names,
|
179 |
+
desc="Applying chat template to test_sft",
|
180 |
+
)
|
181 |
+
|
182 |
|
183 |
###########
|
184 |
# Training
|
185 |
###########
|
186 |
trainer = SFTTrainer(
|
187 |
model=model,
|
188 |
+
args=train_conf,
|
189 |
+
peft_config=peft_conf,
|
190 |
+
train_dataset=processed_train_dataset,
|
191 |
+
eval_dataset=processed_test_dataset,
|
192 |
max_seq_length=2048,
|
193 |
dataset_text_field="text",
|
194 |
tokenizer=tokenizer,
|
|
|
200 |
trainer.save_metrics("train", metrics)
|
201 |
trainer.save_state()
|
202 |
|
203 |
+
|
204 |
#############
|
205 |
# Evaluation
|
206 |
#############
|
207 |
tokenizer.padding_side = 'left'
|
208 |
metrics = trainer.evaluate()
|
209 |
+
metrics["eval_samples"] = len(processed_test_dataset)
|
210 |
trainer.log_metrics("eval", metrics)
|
211 |
trainer.save_metrics("eval", metrics)
|
212 |
|
213 |
+
|
214 |
+
# ############
|
215 |
+
# # Save model
|
216 |
+
# ############
|
217 |
+
trainer.save_model(train_conf.output_dir)
|