MatsRooth commited on
Commit
d129b8e
1 Parent(s): 08b5440

Upload tune.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tune.py +312 -0
tune.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Follow https://blog.ovhcloud.com/fine-tuning-llama-2-models-using-a-single-gpu-qlora-and-ai-notebooks/
2
+
3
+ # Connect to a compute node interactively
4
+ # srun --partition=gpu-interactive --gpus=a5000:1 --mem=16000 --pty /bin/bash
5
+ # source env/hugh/bin/activate
6
+ # cd /share/compling/speech/llama_tuning
7
+
8
+ # On first exectution
9
+ # Downloading and preparing dataset json/databricks--databricks-dolly-15k ...
10
+
11
+ import argparse
12
+ import bitsandbytes as bnb
13
+ from datasets import load_dataset
14
+ from functools import partial
15
+ import os
16
+ from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training, AutoPeftModelForCausalLM
17
+ import torch
18
+ from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed, Trainer, TrainingArguments, BitsAndBytesConfig, \
19
+ DataCollatorForLanguageModeling, Trainer, TrainingArguments
20
+ from datasets import load_dataset
21
+
22
+ def load_model(model_name, bnb_config):
23
+ n_gpus = torch.cuda.device_count()
24
+ max_memory = f'{40960}MB'
25
+
26
+ model = AutoModelForCausalLM.from_pretrained(
27
+ model_name,
28
+ quantization_config=bnb_config,
29
+ device_map="auto", # dispatch efficiently the model on the available ressources
30
+ max_memory = {i: max_memory for i in range(n_gpus)},
31
+ )
32
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=True)
33
+
34
+ # Needed for LLaMA tokenizer
35
+ tokenizer.pad_token = tokenizer.eos_token
36
+
37
+ return model, tokenizer
38
+
39
+ # Load the databricks dataset from Hugging Face
40
+ from datasets import load_dataset
41
+
42
+ dataset = load_dataset("databricks/databricks-dolly-15k", split="train")
43
+
44
+ print(f'Number of prompts: {len(dataset)}')
45
+ print(f'Column names are: {dataset.column_names}')
46
+
47
+ # Output
48
+ # Number of prompts: 15011
49
+ # Column names are: ['instruction', 'context', 'response', 'category']
50
+
51
+ ## Pre-processing the dataset
52
+ def create_prompt_formats(sample):
53
+ """
54
+ Format various fields of the sample ('instruction', 'context', 'response')
55
+ Then concatenate them using two newline characters
56
+ :param sample: Sample dictionnary
57
+ """
58
+
59
+ INTRO_BLURB = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
60
+ INSTRUCTION_KEY = "### Instruction:"
61
+ INPUT_KEY = "Input:"
62
+ RESPONSE_KEY = "### Response:"
63
+ END_KEY = "### End"
64
+
65
+ blurb = f"{INTRO_BLURB}"
66
+ instruction = f"{INSTRUCTION_KEY}\n{sample['instruction']}"
67
+ input_context = f"{INPUT_KEY}\n{sample['context']}" if sample["context"] else None
68
+ response = f"{RESPONSE_KEY}\n{sample['response']}"
69
+ end = f"{END_KEY}"
70
+
71
+ parts = [part for part in [blurb, instruction, input_context, response, end] if part]
72
+
73
+ formatted_prompt = "\n\n".join(parts)
74
+
75
+ sample["text"] = formatted_prompt
76
+
77
+ return sample
78
+
79
+
80
+ # SOURCE https://github.com/databrickslabs/dolly/blob/master/training/trainer.py
81
+ def get_max_length(model):
82
+ conf = model.config
83
+ max_length = None
84
+ for length_setting in ["n_positions", "max_position_embeddings", "seq_length"]:
85
+ max_length = getattr(model.config, length_setting, None)
86
+ if max_length:
87
+ print(f"Found max lenth: {max_length}")
88
+ break
89
+ if not max_length:
90
+ max_length = 1024
91
+ print(f"Using default max length: {max_length}")
92
+ return max_length
93
+
94
+
95
+ def preprocess_batch(batch, tokenizer, max_length):
96
+ """
97
+ Tokenizing a batch
98
+ """
99
+ return tokenizer(
100
+ batch["text"],
101
+ max_length=max_length,
102
+ truncation=True,
103
+ )
104
+
105
+
106
+ # SOURCE https://github.com/databrickslabs/dolly/blob/master/training/trainer.py
107
+ def preprocess_dataset(tokenizer: AutoTokenizer, max_length: int, seed, dataset: str):
108
+ """Format & tokenize it so it is ready for training
109
+ :param tokenizer (AutoTokenizer): Model Tokenizer
110
+ :param max_length (int): Maximum number of tokens to emit from tokenizer
111
+ """
112
+
113
+ # Add prompt to each sample
114
+ print("Preprocessing dataset...")
115
+ dataset = dataset.map(create_prompt_formats)#, batched=True)
116
+
117
+ # Apply preprocessing to each batch of the dataset & and remove 'instruction', 'context', 'response', 'category' fields
118
+ _preprocessing_function = partial(preprocess_batch, max_length=max_length, tokenizer=tokenizer)
119
+ dataset = dataset.map(
120
+ _preprocessing_function,
121
+ batched=True,
122
+ remove_columns=["instruction", "context", "response", "text", "category"],
123
+ )
124
+
125
+ # Filter out samples that have input_ids exceeding max_length
126
+ dataset = dataset.filter(lambda sample: len(sample["input_ids"]) < max_length)
127
+
128
+ # Shuffle dataset
129
+ dataset = dataset.shuffle(seed=seed)
130
+
131
+ return dataset
132
+
133
+ ## Create a bitsandbytes configuration
134
+ def create_bnb_config():
135
+ bnb_config = BitsAndBytesConfig(
136
+ load_in_4bit=True,
137
+ bnb_4bit_use_double_quant=True,
138
+ bnb_4bit_quant_type="nf4",
139
+ bnb_4bit_compute_dtype=torch.bfloat16,
140
+ )
141
+
142
+ return bnb_config
143
+
144
+ def create_peft_config(modules):
145
+ """
146
+ Create Parameter-Efficient Fine-Tuning config for your model
147
+ :param modules: Names of the modules to apply Lora to
148
+ """
149
+ config = LoraConfig(
150
+ r=16, # dimension of the updated matrices
151
+ lora_alpha=64, # parameter for scaling
152
+ target_modules=modules,
153
+ lora_dropout=0.1, # dropout probability for layers
154
+ bias="none",
155
+ task_type="CAUSAL_LM",
156
+ )
157
+
158
+ return config
159
+
160
+ # SOURCE https://github.com/artidoro/qlora/blob/main/qlora.py
161
+
162
+ def find_all_linear_names(model):
163
+ cls = bnb.nn.Linear4bit #if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear)
164
+ lora_module_names = set()
165
+ for name, module in model.named_modules():
166
+ if isinstance(module, cls):
167
+ names = name.split('.')
168
+ lora_module_names.add(names[0] if len(names) == 1 else names[-1])
169
+
170
+ if 'lm_head' in lora_module_names: # needed for 16-bit
171
+ lora_module_names.remove('lm_head')
172
+ return list(lora_module_names)
173
+
174
+ def print_trainable_parameters(model, use_4bit=False):
175
+ """
176
+ Prints the number of trainable parameters in the model.
177
+ """
178
+ trainable_params = 0
179
+ all_param = 0
180
+ for _, param in model.named_parameters():
181
+ num_params = param.numel()
182
+ # if using DS Zero 3 and the weights are initialized empty
183
+ if num_params == 0 and hasattr(param, "ds_numel"):
184
+ num_params = param.ds_numel
185
+
186
+ all_param += num_params
187
+ if param.requires_grad:
188
+ trainable_params += num_params
189
+ if use_4bit:
190
+ trainable_params /= 2
191
+ print(
192
+ f"all params: {all_param:,d} || trainable params: {trainable_params:,d} || trainable%: {100 * trainable_params / all_param}"
193
+ )
194
+
195
+ # Load model from HF with user's token and with bitsandbytes config
196
+
197
+ model_name = "meta-llama/Llama-2-7b-hf"
198
+
199
+ bnb_config = create_bnb_config()
200
+
201
+ model, tokenizer = load_model(model_name, bnb_config)
202
+
203
+ print(model)
204
+
205
+ ## Preprocess dataset
206
+
207
+ max_length = get_max_length(model)
208
+
209
+ print(max_length)
210
+
211
+ # The seed seems to be missing in https://blog.ovhcloud.com/fine-tuning-llama-2-models-using-a-single-gpu-qlora-and-ai-notebooks/
212
+ # It is supposed to be an int, make one up.
213
+ seed = 98345
214
+
215
+ dataset = preprocess_dataset(tokenizer, max_length, seed, dataset)
216
+
217
+
218
+ def train(model, tokenizer, dataset, output_dir):
219
+ # Apply preprocessing to the model to prepare it by
220
+ # 1 - Enabling gradient checkpointing to reduce memory usage during fine-tuning
221
+ model.gradient_checkpointing_enable()
222
+
223
+ # 2 - Using the prepare_model_for_kbit_training method from PEFT
224
+ model = prepare_model_for_kbit_training(model)
225
+
226
+ # Get lora module names
227
+ modules = find_all_linear_names(model)
228
+
229
+ # Create PEFT config for these modules and wrap the model to PEFT
230
+ peft_config = create_peft_config(modules)
231
+ model = get_peft_model(model, peft_config)
232
+
233
+ # Print information about the percentage of trainable parameters
234
+ print_trainable_parameters(model)
235
+
236
+ # Training parameters
237
+ trainer = Trainer(
238
+ model=model,
239
+ train_dataset=dataset,
240
+ args=TrainingArguments(
241
+ per_device_train_batch_size=1,
242
+ gradient_accumulation_steps=4,
243
+ warmup_steps=2,
244
+ max_steps=20,
245
+ learning_rate=2e-4,
246
+ fp16=True,
247
+ logging_steps=1,
248
+ output_dir="outputs",
249
+ optim="paged_adamw_8bit",
250
+ ),
251
+ data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False)
252
+ )
253
+
254
+ model.config.use_cache = False # re-enable for inference to speed up predictions for similar inputs
255
+
256
+ ### SOURCE https://github.com/artidoro/qlora/blob/main/qlora.py
257
+ # Verifying the datatypes before training
258
+
259
+ dtypes = {}
260
+ for _, p in model.named_parameters():
261
+ dtype = p.dtype
262
+ if dtype not in dtypes: dtypes[dtype] = 0
263
+ dtypes[dtype] += p.numel()
264
+ total = 0
265
+ for k, v in dtypes.items(): total+= v
266
+ for k, v in dtypes.items():
267
+ print(k, v, v/total)
268
+
269
+ do_train = True
270
+
271
+ # Launch training
272
+ print("Training...")
273
+
274
+ if do_train:
275
+ train_result = trainer.train()
276
+ metrics = train_result.metrics
277
+ trainer.log_metrics("train", metrics)
278
+ trainer.save_metrics("train", metrics)
279
+ trainer.save_state()
280
+ print(metrics)
281
+
282
+ ###
283
+
284
+ # Saving model
285
+ print("Saving last checkpoint of the model...")
286
+ os.makedirs(output_dir, exist_ok=True)
287
+ trainer.model.save_pretrained(output_dir)
288
+
289
+ # Free memory for merging weights
290
+ del model
291
+ del trainer
292
+ torch.cuda.empty_cache()
293
+
294
+ output_dir = "results/llama2/final_checkpoint"
295
+
296
+ # Run train!
297
+ print("Run train ...")
298
+ train(model, tokenizer, dataset, output_dir)
299
+
300
+
301
+ # Merge weights and save the merged checkpoint
302
+ model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16)
303
+ model = model.merge_and_unload()
304
+
305
+ output_merged_dir = "results/llama2/final_merged_checkpoint"
306
+ os.makedirs(output_merged_dir, exist_ok=True)
307
+ model.save_pretrained(output_merged_dir, safe_serialization=True)
308
+
309
+ # save tokenizer for easy inference
310
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
311
+ tokenizer.save_pretrained(output_merged_dir)
312
+