Upload lora_finetune_distributed.py with huggingface_hub
Browse files- lora_finetune_distributed.py +615 -0
lora_finetune_distributed.py
ADDED
@@ -0,0 +1,615 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
+
# All rights reserved.
|
3 |
+
#
|
4 |
+
# This source code is licensed under the BSD-style license found in the
|
5 |
+
# LICENSE file in the root directory of this source tree.
|
6 |
+
|
7 |
+
import sys
|
8 |
+
import time
|
9 |
+
|
10 |
+
from functools import partial
|
11 |
+
from typing import Any, Dict, Optional, Tuple
|
12 |
+
from warnings import warn
|
13 |
+
|
14 |
+
import torch
|
15 |
+
from omegaconf import DictConfig
|
16 |
+
|
17 |
+
from torch import nn
|
18 |
+
from torch.distributed import destroy_process_group, init_process_group
|
19 |
+
from torch.distributed.fsdp import (
|
20 |
+
FullOptimStateDictConfig,
|
21 |
+
FullStateDictConfig,
|
22 |
+
FullyShardedDataParallel as FSDP,
|
23 |
+
StateDictType,
|
24 |
+
)
|
25 |
+
from torch.optim import Optimizer
|
26 |
+
from torch.utils.data import DataLoader, DistributedSampler
|
27 |
+
from torchtune import config, modules, utils
|
28 |
+
from torchtune.modules.peft.peft_utils import (
|
29 |
+
get_adapter_params,
|
30 |
+
get_merged_lora_ckpt,
|
31 |
+
set_trainable_params,
|
32 |
+
validate_state_dict_for_lora,
|
33 |
+
)
|
34 |
+
from torchtune.recipe_interfaces import FTRecipeInterface
|
35 |
+
|
36 |
+
from tqdm import tqdm
|
37 |
+
|
38 |
+
log = utils.get_logger("DEBUG")
|
39 |
+
|
40 |
+
|
41 |
+
class LoRAFinetuneRecipeDistributed(FTRecipeInterface):
|
42 |
+
"""
|
43 |
+
Distributed LoRA finetuning recipe for dense transformer-based LLMs such as Llama2. This recipe supports
|
44 |
+
distributed training and can be run on a single node (1 to 8 GPUs).
|
45 |
+
|
46 |
+
Features:
|
47 |
+
- FSDP. Supported using PyTorch's FSDP APIs. DDP is currently not supported. Traning on CPU is not
|
48 |
+
supported.
|
49 |
+
|
50 |
+
- Activation Checkpointing. This can be controlled using the ``activation_checkpointing``
|
51 |
+
flag. Activation checkpointing helps reduce the memory footprint since we no longer keep
|
52 |
+
activations in memory and instead recompute them during the backward pass. This is especially
|
53 |
+
helpful for larger batch sizes when you're memory constrained. But these savings in memory
|
54 |
+
come at the cost of training performance. In most cases training can slow-down quite a bit as
|
55 |
+
a result of this activation recomputation.
|
56 |
+
|
57 |
+
- Precision. Full fp32 and bf16 training are supported. Precision is controlled using the ``dtype``
|
58 |
+
flag. When ``dtype=bf16``, all activations, gradients and optimizer states are in bfloat16. In
|
59 |
+
most cases this should halve the memory footprint of full precision (fp32) training, without
|
60 |
+
loss in model quality (will depend on the model, training data and other settings). For
|
61 |
+
GPUs which do not support bfloat16, we fall back to fp32. Mixed precision training and fp16
|
62 |
+
precision are currently not supported.
|
63 |
+
|
64 |
+
- Gradient Accumulation. You can simulate larger batch sizes by accumulating gradients. This is
|
65 |
+
controlled using the ``gradient_accumulation_steps`` flag.
|
66 |
+
|
67 |
+
Total Batch Size = batch_size * number of GPUs * gradient accumulation steps.
|
68 |
+
|
69 |
+
For example: with batch_size=1, nproc_per_node=2 and gradient_accumulation_steps=32 we get a
|
70 |
+
total batch size of 64.
|
71 |
+
|
72 |
+
Gradient accumulation is especially useful when you are memory constrained. In this case,
|
73 |
+
accumulating gradients might give you better training speed than enabling activation
|
74 |
+
checkpointing.
|
75 |
+
|
76 |
+
- Checkpointing. Model weights are checkpointed both at the end of each epoch and at the end of
|
77 |
+
training. Currently we checkpoint both the adapter weights (trainable params only) and the
|
78 |
+
complete merged weights (adapter weights added back to the base model). For more details
|
79 |
+
please take a look at our LoRA tutorial
|
80 |
+
(https://pytorch.org/torchtune/main/tutorials/lora_finetune.html).
|
81 |
+
|
82 |
+
Optimizer State and recipe state (seed, total_epochs, number of epochs run etc) are
|
83 |
+
only saved at the end of a given epoch and used in case of resuming training. Resuming
|
84 |
+
training is controlled by the ``resume_from_checkpoint`` flag. Mid-epoch checkpointing is
|
85 |
+
currently not supported.
|
86 |
+
|
87 |
+
For more details on the checkpointer, please take a look at
|
88 |
+
our checkpointer deepdive (https://pytorch.org/torchtune/main/tutorials/checkpointer.html).
|
89 |
+
|
90 |
+
- Logging. Terminal, Disk, WandB and TensorBoard are all supported.
|
91 |
+
|
92 |
+
For a full list of example configs for this recipe, run ``tune ls`` on the command line. Each config
|
93 |
+
has example commands for how to kick-off training.
|
94 |
+
|
95 |
+
Args:
|
96 |
+
cfg (DictConfig): OmegaConf object parsed from yaml file
|
97 |
+
|
98 |
+
Raises:
|
99 |
+
ValueError: If ``dtype`` is set to fp16.
|
100 |
+
ValueError: If world_size is 1
|
101 |
+
RuntimeError: If ``dtype`` is set to bf16 and the hardware does not support bf16.
|
102 |
+
"""
|
103 |
+
|
104 |
+
def __init__(self, cfg: DictConfig) -> None:
|
105 |
+
self._device = utils.get_device(device=cfg.device)
|
106 |
+
self._dtype = utils.get_dtype(cfg.dtype, device=self._device)
|
107 |
+
|
108 |
+
if self._dtype == torch.float16:
|
109 |
+
raise ValueError(
|
110 |
+
"full fp16 training is not supported with this recipe. Please use bf16 or fp32 instead."
|
111 |
+
)
|
112 |
+
|
113 |
+
_, rank = utils.get_world_size_and_rank()
|
114 |
+
|
115 |
+
# _is_rank_zero is used primarily for logging. In the future, the logger
|
116 |
+
# should directly take care of this
|
117 |
+
self._is_rank_zero = rank == 0
|
118 |
+
|
119 |
+
# logging attributes
|
120 |
+
self._output_dir = cfg.output_dir
|
121 |
+
self._log_every_n_steps = cfg.log_every_n_steps if cfg.log_every_n_steps else 1
|
122 |
+
self._log_peak_memory_every_n_steps = 100
|
123 |
+
|
124 |
+
# training attributes
|
125 |
+
self._enable_activation_checkpointing = cfg.enable_activation_checkpointing
|
126 |
+
|
127 |
+
# These attributes constitute the recipe state and are updated by ``load_checkpoint``
|
128 |
+
# when ``resume_from_checkpoint`` is ``True``
|
129 |
+
self.seed = utils.set_seed(seed=cfg.seed)
|
130 |
+
self.epochs_run = 0
|
131 |
+
self.total_epochs = cfg.epochs
|
132 |
+
self.max_steps_per_epoch = cfg.max_steps_per_epoch
|
133 |
+
self.total_training_steps = 0
|
134 |
+
|
135 |
+
self._resume_from_checkpoint = cfg.resume_from_checkpoint
|
136 |
+
self._gradient_accumulation_steps = cfg.gradient_accumulation_steps
|
137 |
+
|
138 |
+
def load_checkpoint(self, cfg_checkpointer: DictConfig) -> Dict[str, Any]:
|
139 |
+
"""
|
140 |
+
Extract the checkpoint state from file and validate. This includes the
|
141 |
+
base model weights. If resume_from_checkpoint is True, this also includes
|
142 |
+
the adapter weights and recipe state
|
143 |
+
"""
|
144 |
+
self._checkpointer = config.instantiate(
|
145 |
+
cfg_checkpointer,
|
146 |
+
resume_from_checkpoint=self._resume_from_checkpoint,
|
147 |
+
)
|
148 |
+
checkpoint_dict = self._checkpointer.load_checkpoint()
|
149 |
+
|
150 |
+
# When resuming from checkpoint for LoRA, the recipe expects the adapter weights
|
151 |
+
# and recipe state to be present. The keys should match up with what ``save_checkpoint``
|
152 |
+
# used to create these intermediate checkpoints
|
153 |
+
if self._resume_from_checkpoint:
|
154 |
+
if utils.ADAPTER_KEY not in checkpoint_dict:
|
155 |
+
raise ValueError(
|
156 |
+
"Adapter weights not found. Please ensure a valid adapter checkpoint is provided."
|
157 |
+
)
|
158 |
+
# _update_recipe_state will throw an exception if the recipe state is not corrctly loaded
|
159 |
+
# no need to check here
|
160 |
+
self._update_recipe_state(checkpoint_dict)
|
161 |
+
return checkpoint_dict
|
162 |
+
|
163 |
+
def _update_recipe_state(self, ckpt_dict: Dict[str, Any]) -> None:
|
164 |
+
"""
|
165 |
+
Updates the recipe state from checkpoint.
|
166 |
+
"""
|
167 |
+
if not (
|
168 |
+
utils.SEED_KEY in ckpt_dict
|
169 |
+
and utils.TOTAL_EPOCHS_KEY in ckpt_dict
|
170 |
+
and utils.MAX_STEPS_KEY in ckpt_dict
|
171 |
+
):
|
172 |
+
raise KeyError(
|
173 |
+
"Checkpoint does not contain the required keys needed for updating recipe state."
|
174 |
+
"Are you sure you passed in the right recipe checkpoint?"
|
175 |
+
)
|
176 |
+
# If seed, total_epoch or max_steps_per_epoch don't match,
|
177 |
+
# warn the user and overwrite
|
178 |
+
if (
|
179 |
+
self.seed != ckpt_dict[utils.SEED_KEY]
|
180 |
+
or self.total_epochs != ckpt_dict[utils.TOTAL_EPOCHS_KEY]
|
181 |
+
or self.max_steps_per_epoch != ckpt_dict[utils.MAX_STEPS_KEY]
|
182 |
+
):
|
183 |
+
warn(
|
184 |
+
message="""Configured value for seed, epochs or max_steps_per_epoch
|
185 |
+
does not match the value stored in checkpoint."""
|
186 |
+
)
|
187 |
+
self.seed = utils.set_seed(seed=ckpt_dict[utils.SEED_KEY])
|
188 |
+
self.epochs_run = ckpt_dict[utils.EPOCHS_KEY]
|
189 |
+
self.total_epochs = ckpt_dict[utils.TOTAL_EPOCHS_KEY]
|
190 |
+
self.max_steps_per_epoch = ckpt_dict[utils.MAX_STEPS_KEY]
|
191 |
+
|
192 |
+
def setup(self, cfg: DictConfig) -> None:
|
193 |
+
"""
|
194 |
+
Setup the recipe state. This includes recipe state (if resume_from_checkpoint is True),
|
195 |
+
model, tokenizer, loss, optimizer, learning rate scheduler, sampler, and dataloader.
|
196 |
+
"""
|
197 |
+
if self._is_rank_zero:
|
198 |
+
self._metric_logger = config.instantiate(cfg.metric_logger)
|
199 |
+
|
200 |
+
# log config with parameter override
|
201 |
+
self._metric_logger.log_config(cfg)
|
202 |
+
|
203 |
+
checkpoint_dict = self.load_checkpoint(cfg_checkpointer=cfg.checkpointer)
|
204 |
+
|
205 |
+
self._model = self._setup_model(
|
206 |
+
cfg_model=cfg.model,
|
207 |
+
enable_activation_checkpointing=cfg.enable_activation_checkpointing,
|
208 |
+
base_model_state_dict=checkpoint_dict[utils.MODEL_KEY],
|
209 |
+
lora_weights_state_dict=(
|
210 |
+
checkpoint_dict[utils.ADAPTER_KEY]
|
211 |
+
if self._resume_from_checkpoint
|
212 |
+
else None
|
213 |
+
),
|
214 |
+
)
|
215 |
+
self._tokenizer = config.instantiate(cfg.tokenizer)
|
216 |
+
|
217 |
+
self._optimizer = self._setup_optimizer(
|
218 |
+
cfg_optimizer=cfg.optimizer,
|
219 |
+
opt_state_dict=checkpoint_dict[utils.OPT_KEY]
|
220 |
+
if self._resume_from_checkpoint
|
221 |
+
else None,
|
222 |
+
)
|
223 |
+
|
224 |
+
self._loss_fn = config.instantiate(cfg.loss)
|
225 |
+
|
226 |
+
# sampler and dataloader depend on the tokenizer and loss_fn and should be
|
227 |
+
# setup after all of these are setup
|
228 |
+
self._sampler, self._dataloader = self._setup_data(
|
229 |
+
cfg_dataset=cfg.dataset,
|
230 |
+
shuffle=cfg.shuffle,
|
231 |
+
batch_size=cfg.batch_size,
|
232 |
+
)
|
233 |
+
|
234 |
+
# Finally update the recipe state which can only be correctly set after all of the
|
235 |
+
# other components have been initialized and updated.
|
236 |
+
|
237 |
+
# Number of training steps in each epoch depends on the number of batches produced
|
238 |
+
# by the dataloader and the max_steps_per_epoch param set by the user and is used
|
239 |
+
# for logging and tracking training state. This should be computed after the dataloader
|
240 |
+
# has been setup
|
241 |
+
self._steps_per_epoch = (
|
242 |
+
len(self._dataloader) // self._gradient_accumulation_steps
|
243 |
+
)
|
244 |
+
if (
|
245 |
+
self.max_steps_per_epoch is not None
|
246 |
+
and self.max_steps_per_epoch < self._steps_per_epoch
|
247 |
+
):
|
248 |
+
self._steps_per_epoch = self.max_steps_per_epoch
|
249 |
+
self.total_training_steps = self.epochs_run * self._steps_per_epoch
|
250 |
+
|
251 |
+
# Learning rate scheduler can only be set up after number of steps
|
252 |
+
# has been computed
|
253 |
+
self._lr_scheduler = self._setup_lr_scheduler(
|
254 |
+
cfg_lr_scheduler=cfg.lr_scheduler,
|
255 |
+
num_training_steps=self.total_epochs * self._steps_per_epoch,
|
256 |
+
last_epoch=self.total_training_steps - 1,
|
257 |
+
)
|
258 |
+
|
259 |
+
def _setup_model(
|
260 |
+
self,
|
261 |
+
cfg_model: DictConfig,
|
262 |
+
enable_activation_checkpointing: bool,
|
263 |
+
base_model_state_dict: Dict[str, Any],
|
264 |
+
lora_weights_state_dict: Optional[Dict[str, Any]] = None,
|
265 |
+
) -> nn.Module:
|
266 |
+
"""
|
267 |
+
Model initialization has some important considerations:
|
268 |
+
a. To minimize GPU peak memory, we load the model on CPU with the right
|
269 |
+
dtype. To ensure that we don't instantiate ``world_size`` number of models,
|
270 |
+
we initialize on meta_device for all ranks other than rank 0.
|
271 |
+
b. Rank 0 is also responsible for calling ``load_state_dict`` and loading the
|
272 |
+
model weights from checkpoint.
|
273 |
+
c. While wrapping the model with FSDP, we set ``sync_module_states``
|
274 |
+
to TRUE and broadcast module params and buffers from rank 0.
|
275 |
+
d. The ``device_id`` param ensures that the FSDP initialization happens on
|
276 |
+
the correct device.
|
277 |
+
"""
|
278 |
+
|
279 |
+
if self._is_rank_zero:
|
280 |
+
log.info("FSDP is enabled. Instantiating Model on CPU for Rank 0 ...")
|
281 |
+
init_start = time.perf_counter()
|
282 |
+
|
283 |
+
with utils.set_default_dtype(self._dtype):
|
284 |
+
model = config.instantiate(cfg_model)
|
285 |
+
|
286 |
+
log.info(
|
287 |
+
f"Model instantiation took {time.perf_counter() - init_start:.2f} secs"
|
288 |
+
)
|
289 |
+
|
290 |
+
# The model contains LoRA params which won't have any matching keys in
|
291 |
+
# the state dict. As a result, we need to load with strict=False.
|
292 |
+
# Before loading the state dict, ensure the state dict keys for the base
|
293 |
+
# model and adapters (if available) match the keys in the full LoRA model
|
294 |
+
# This is a good sanity check to prevent silent errors
|
295 |
+
validate_state_dict_for_lora(
|
296 |
+
lora_attn_modules=cfg_model.lora_attn_modules,
|
297 |
+
apply_lora_to_mlp=cfg_model.apply_lora_to_mlp,
|
298 |
+
apply_lora_to_output=cfg_model.apply_lora_to_output,
|
299 |
+
full_model_state_dict_keys=model.state_dict().keys(),
|
300 |
+
lora_state_dict_keys=(
|
301 |
+
lora_weights_state_dict.keys()
|
302 |
+
if lora_weights_state_dict is not None
|
303 |
+
else None
|
304 |
+
),
|
305 |
+
base_model_state_dict_keys=base_model_state_dict.keys(),
|
306 |
+
)
|
307 |
+
|
308 |
+
# Load both the base model weights and (if available) the adapter weights. Both
|
309 |
+
# of this should happen only on Rank 0
|
310 |
+
model.load_state_dict(base_model_state_dict, strict=False)
|
311 |
+
if lora_weights_state_dict:
|
312 |
+
model.load_state_dict(lora_weights_state_dict, strict=False)
|
313 |
+
|
314 |
+
else:
|
315 |
+
# For non-zero ranks, load the model on meta device
|
316 |
+
with utils.set_default_dtype(self._dtype), torch.device("meta"):
|
317 |
+
model = config.instantiate(cfg_model)
|
318 |
+
|
319 |
+
if self._dtype == torch.bfloat16:
|
320 |
+
model = model.to(torch.bfloat16)
|
321 |
+
|
322 |
+
# LoRA hyper-params needed for merging weights while saving checkpoints
|
323 |
+
self._lora_rank = cfg_model.lora_rank
|
324 |
+
self._lora_alpha = cfg_model.lora_alpha
|
325 |
+
|
326 |
+
# Note: this needs to be set before wrapping with FSDP
|
327 |
+
self.adapter_params = get_adapter_params(model)
|
328 |
+
set_trainable_params(model, self.adapter_params)
|
329 |
+
|
330 |
+
model = FSDP(
|
331 |
+
module=model,
|
332 |
+
auto_wrap_policy=utils.lora_fsdp_wrap_policy(
|
333 |
+
modules_to_wrap={modules.TransformerDecoderLayer}
|
334 |
+
),
|
335 |
+
sharding_strategy=torch.distributed.fsdp.ShardingStrategy.FULL_SHARD,
|
336 |
+
device_id=self._device,
|
337 |
+
# this recipe does not currently support mixed precision training
|
338 |
+
mixed_precision=None,
|
339 |
+
# Ensure we broadcast params and buffers from rank 0
|
340 |
+
sync_module_states=True,
|
341 |
+
# Initialize empty modules on all non-zero ranks
|
342 |
+
param_init_fn=(
|
343 |
+
lambda module: module.to_empty(
|
344 |
+
device=torch.device("cuda"), recurse=False
|
345 |
+
)
|
346 |
+
if not self._is_rank_zero
|
347 |
+
else None
|
348 |
+
),
|
349 |
+
)
|
350 |
+
|
351 |
+
# Ensure no params and buffers are on meta device
|
352 |
+
utils.validate_no_params_on_meta_device(model)
|
353 |
+
|
354 |
+
if enable_activation_checkpointing:
|
355 |
+
utils.set_activation_checkpointing(
|
356 |
+
model, auto_wrap_policy={modules.TransformerDecoderLayer}
|
357 |
+
)
|
358 |
+
if self._is_rank_zero:
|
359 |
+
memory_stats = utils.memory_stats_log(device=self._device)
|
360 |
+
log.info(f"Memory Stats after model init:\n{memory_stats}")
|
361 |
+
|
362 |
+
# synchronize before training begins
|
363 |
+
torch.distributed.barrier()
|
364 |
+
|
365 |
+
return model
|
366 |
+
|
367 |
+
def _setup_optimizer(
|
368 |
+
self, cfg_optimizer: DictConfig, opt_state_dict: Optional[Dict[str, Any]] = None
|
369 |
+
) -> Optimizer:
|
370 |
+
optimizer = config.instantiate(cfg_optimizer, self._model.parameters())
|
371 |
+
if opt_state_dict:
|
372 |
+
# Note: technically we should check _contains_fsdp for
|
373 |
+
# just the state dict of the adapter cfg, but should be equivalent
|
374 |
+
opt_state_dict = utils.transform_opt_state_dict(
|
375 |
+
opt_state_dict, self._model, optimizer
|
376 |
+
)
|
377 |
+
optimizer.load_state_dict(opt_state_dict)
|
378 |
+
|
379 |
+
if self._is_rank_zero:
|
380 |
+
log.info("Optimizer and loss are initialized.")
|
381 |
+
return optimizer
|
382 |
+
|
383 |
+
def _setup_lr_scheduler(
|
384 |
+
self,
|
385 |
+
cfg_lr_scheduler: DictConfig,
|
386 |
+
num_training_steps: int,
|
387 |
+
last_epoch: int,
|
388 |
+
) -> Optimizer:
|
389 |
+
lr_scheduler = config.instantiate(
|
390 |
+
cfg_lr_scheduler,
|
391 |
+
self._optimizer,
|
392 |
+
num_training_steps=num_training_steps,
|
393 |
+
last_epoch=last_epoch,
|
394 |
+
)
|
395 |
+
if self._is_rank_zero:
|
396 |
+
log.info("Learning rate scheduler is initialized.")
|
397 |
+
return lr_scheduler
|
398 |
+
|
399 |
+
def _setup_data(
|
400 |
+
self,
|
401 |
+
cfg_dataset: DictConfig,
|
402 |
+
shuffle: bool,
|
403 |
+
batch_size: int,
|
404 |
+
) -> Tuple[DistributedSampler, DataLoader]:
|
405 |
+
"""
|
406 |
+
All data related setup happens here. Currently this recipe only supports the
|
407 |
+
DistributedSamplers with Map-style Datasets which fit into memory. Other samplers,
|
408 |
+
iterable datasets and streaming datasets are not supported.
|
409 |
+
"""
|
410 |
+
world_size, rank = utils.get_world_size_and_rank()
|
411 |
+
ds = config.instantiate(cfg_dataset, tokenizer=self._tokenizer)
|
412 |
+
sampler = DistributedSampler(
|
413 |
+
ds, num_replicas=world_size, rank=rank, shuffle=shuffle, seed=0
|
414 |
+
)
|
415 |
+
|
416 |
+
dataloader = DataLoader(
|
417 |
+
dataset=ds,
|
418 |
+
batch_size=batch_size,
|
419 |
+
sampler=sampler,
|
420 |
+
collate_fn=partial(
|
421 |
+
utils.padded_collate,
|
422 |
+
padding_idx=self._tokenizer.pad_id,
|
423 |
+
ignore_idx=self._loss_fn.ignore_index,
|
424 |
+
),
|
425 |
+
)
|
426 |
+
|
427 |
+
if self._is_rank_zero:
|
428 |
+
log.info("Dataset and Sampler are initialized.")
|
429 |
+
|
430 |
+
return sampler, dataloader
|
431 |
+
|
432 |
+
def save_checkpoint(
|
433 |
+
self,
|
434 |
+
epoch: int,
|
435 |
+
) -> None:
|
436 |
+
"""
|
437 |
+
Checkpoint the state of the recipe. The constructed checkpoint state dict
|
438 |
+
contains the following information:
|
439 |
+
- Merged weights with key MODEL_KEY
|
440 |
+
- Adapter weights with key ADAPTER_KEY
|
441 |
+
- Relevant recipe state if training is not complete
|
442 |
+
|
443 |
+
Checkpointer will save the merged weights, adapter weights and recipe state in
|
444 |
+
different checkpoint files. To correctly resume from training, the adapter weights
|
445 |
+
and recipe state must be provided along with the base model weights.
|
446 |
+
"""
|
447 |
+
# final dict passed onto the checkpointer
|
448 |
+
checkpoint_dict = {}
|
449 |
+
|
450 |
+
intermediate_checkpoint = epoch + 1 < self.total_epochs
|
451 |
+
# To prevent GPU memory from spiking during checkpoint save,
|
452 |
+
# we consolidate the full model and optim state dicts on CPU for rank 0
|
453 |
+
with FSDP.state_dict_type(
|
454 |
+
self._model,
|
455 |
+
StateDictType.FULL_STATE_DICT,
|
456 |
+
FullStateDictConfig(offload_to_cpu=True, rank0_only=True),
|
457 |
+
FullOptimStateDictConfig(offload_to_cpu=True, rank0_only=True),
|
458 |
+
):
|
459 |
+
cpu_state_dict = self._model.state_dict()
|
460 |
+
if intermediate_checkpoint:
|
461 |
+
opt_state_dict = FSDP.optim_state_dict(self._model, self._optimizer)
|
462 |
+
else:
|
463 |
+
opt_state_dict = None
|
464 |
+
|
465 |
+
# Now that we have the model and opt state dict, create the actual checkpoint dict
|
466 |
+
# to be sent to the checkpointer and ultimately written to file
|
467 |
+
if self._is_rank_zero:
|
468 |
+
|
469 |
+
# Filter out the adapter keys and weights from the model state dict. These will
|
470 |
+
# be saved separately
|
471 |
+
adapter_key_filter = lambda x: x in self.adapter_params
|
472 |
+
adapter_state_dict = {
|
473 |
+
k: v for k, v in cpu_state_dict.items() if adapter_key_filter(k)
|
474 |
+
}
|
475 |
+
checkpoint_dict.update({utils.ADAPTER_KEY: adapter_state_dict})
|
476 |
+
|
477 |
+
# merge the adapter weights and base weights to create the model checkpoint
|
478 |
+
merged_state_dict = get_merged_lora_ckpt(
|
479 |
+
cpu_state_dict,
|
480 |
+
rank=self._lora_rank,
|
481 |
+
alpha=self._lora_alpha,
|
482 |
+
)
|
483 |
+
checkpoint_dict.update({utils.MODEL_KEY: merged_state_dict})
|
484 |
+
|
485 |
+
# if training is in-progress, checkpoint the optimizer state and recipe state
|
486 |
+
# as well.
|
487 |
+
if intermediate_checkpoint:
|
488 |
+
checkpoint_dict.update(
|
489 |
+
{
|
490 |
+
utils.OPT_KEY: opt_state_dict,
|
491 |
+
utils.SEED_KEY: self.seed,
|
492 |
+
utils.EPOCHS_KEY: self.epochs_run,
|
493 |
+
utils.TOTAL_EPOCHS_KEY: self.total_epochs,
|
494 |
+
utils.MAX_STEPS_KEY: self.max_steps_per_epoch,
|
495 |
+
}
|
496 |
+
)
|
497 |
+
|
498 |
+
self._checkpointer.save_checkpoint(
|
499 |
+
checkpoint_dict,
|
500 |
+
epoch=epoch,
|
501 |
+
intermediate_checkpoint=intermediate_checkpoint,
|
502 |
+
)
|
503 |
+
|
504 |
+
def train(self) -> None:
|
505 |
+
"""
|
506 |
+
The core training loop.
|
507 |
+
"""
|
508 |
+
# clean up before training begins
|
509 |
+
utils.cleanup_before_training()
|
510 |
+
|
511 |
+
_, rank = utils.get_world_size_and_rank()
|
512 |
+
|
513 |
+
# zero out the gradients before starting training
|
514 |
+
self._optimizer.zero_grad()
|
515 |
+
|
516 |
+
# self.epochs_run should be non-zero when we're resuming from a checkpoint
|
517 |
+
for curr_epoch in range(self.epochs_run, self.total_epochs):
|
518 |
+
|
519 |
+
# Update the sampler to ensure data is correctly shuffled across epochs
|
520 |
+
# in case shuffle is True
|
521 |
+
self._sampler.set_epoch(curr_epoch)
|
522 |
+
|
523 |
+
for idx, batch in enumerate(
|
524 |
+
pbar := tqdm(self._dataloader, disable=not (rank == 0))
|
525 |
+
):
|
526 |
+
if (
|
527 |
+
self.max_steps_per_epoch is not None
|
528 |
+
and (idx // self._gradient_accumulation_steps)
|
529 |
+
== self.max_steps_per_epoch
|
530 |
+
):
|
531 |
+
break
|
532 |
+
|
533 |
+
input_ids, labels = batch
|
534 |
+
input_ids = input_ids.to(self._device)
|
535 |
+
labels = labels.to(self._device)
|
536 |
+
|
537 |
+
logits = self._model(input_ids)
|
538 |
+
# Shift so that tokens < n predict n
|
539 |
+
logits = logits[..., :-1, :].contiguous()
|
540 |
+
labels = labels[..., 1:].contiguous()
|
541 |
+
logits = logits.transpose(1, 2)
|
542 |
+
# Compute loss
|
543 |
+
loss = self._loss_fn(logits, labels)
|
544 |
+
|
545 |
+
if (
|
546 |
+
self.total_training_steps % self._log_every_n_steps == 0
|
547 |
+
and self._is_rank_zero
|
548 |
+
):
|
549 |
+
pbar.set_description(f"{curr_epoch+1}|{idx+1}|Loss: {loss.item()}")
|
550 |
+
self._metric_logger.log_dict(
|
551 |
+
{
|
552 |
+
"loss": loss.item(),
|
553 |
+
"lr": self._optimizer.param_groups[0]["lr"],
|
554 |
+
"gpu_resources": torch.cuda.memory_allocated(),
|
555 |
+
},
|
556 |
+
step=self.total_training_steps, # Each step is unique, not limited to each epoch
|
557 |
+
)
|
558 |
+
|
559 |
+
loss = loss / self._gradient_accumulation_steps
|
560 |
+
loss.backward()
|
561 |
+
|
562 |
+
if (idx + 1) % self._gradient_accumulation_steps == 0:
|
563 |
+
self._optimizer.step()
|
564 |
+
self._optimizer.zero_grad(set_to_none=True)
|
565 |
+
self._lr_scheduler.step()
|
566 |
+
|
567 |
+
# Update the number of steps when the weights are updated
|
568 |
+
self.total_training_steps += 1
|
569 |
+
|
570 |
+
if (
|
571 |
+
self.total_training_steps % self._log_peak_memory_every_n_steps == 0
|
572 |
+
and self._is_rank_zero
|
573 |
+
):
|
574 |
+
# Log peak memory for iteration
|
575 |
+
memory_stats = utils.memory_stats_log(device=self._device)
|
576 |
+
self._metric_logger.log_dict(
|
577 |
+
memory_stats, step=self.total_training_steps
|
578 |
+
)
|
579 |
+
|
580 |
+
self.epochs_run += 1
|
581 |
+
self.save_checkpoint(epoch=curr_epoch)
|
582 |
+
|
583 |
+
def cleanup(self) -> None:
|
584 |
+
if self._is_rank_zero:
|
585 |
+
self._metric_logger.close()
|
586 |
+
destroy_process_group()
|
587 |
+
|
588 |
+
|
589 |
+
@config.parse
|
590 |
+
def recipe_main(cfg: DictConfig) -> None:
|
591 |
+
"""
|
592 |
+
Entry point for the recipe.
|
593 |
+
|
594 |
+
Configurable parameters are read in the following order:
|
595 |
+
- Parameters specified in config (see available configs through ``tune ls``)
|
596 |
+
- Overwritten by arguments from the command-line
|
597 |
+
"""
|
598 |
+
if not utils.is_distributed():
|
599 |
+
raise RuntimeError(
|
600 |
+
"Distributed finetune recipe should be run via a distributed launcher."
|
601 |
+
"If using tune CLI, please specify --nnodes 1 and --nproc_per_node [num_gpus]"
|
602 |
+
)
|
603 |
+
|
604 |
+
init_process_group(backend="gloo" if cfg.device == "cpu" else "nccl")
|
605 |
+
|
606 |
+
config.log_config(recipe_name="LoRAFinetuneRecipeDistributed", cfg=cfg)
|
607 |
+
|
608 |
+
recipe = LoRAFinetuneRecipeDistributed(cfg=cfg)
|
609 |
+
recipe.setup(cfg=cfg)
|
610 |
+
recipe.train()
|
611 |
+
recipe.cleanup()
|
612 |
+
|
613 |
+
|
614 |
+
if __name__ == "__main__":
|
615 |
+
sys.exit(recipe_main())
|