Spaces:
Runtime error
Runtime error
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team. | |
# | |
# This code is inspired by the HuggingFace's transformers library. | |
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/trainer.py | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import json | |
import os | |
from types import MethodType | |
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union | |
import torch | |
from transformers import Trainer | |
from typing_extensions import override | |
from ...extras.logging import get_logger | |
from ..callbacks import FixValueHeadModelCallback, PissaConvertCallback, SaveProcessorCallback | |
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler | |
if TYPE_CHECKING: | |
from transformers import PreTrainedModel, ProcessorMixin | |
from transformers.trainer import PredictionOutput | |
from ...hparams import FinetuningArguments | |
logger = get_logger(__name__) | |
class PairwiseTrainer(Trainer): | |
r""" | |
Inherits Trainer to compute pairwise loss. | |
""" | |
def __init__( | |
self, finetuning_args: "FinetuningArguments", processor: Optional["ProcessorMixin"], **kwargs | |
) -> None: | |
super().__init__(**kwargs) | |
self.finetuning_args = finetuning_args | |
self.can_return_loss = True # override property to return eval_loss | |
self.add_callback(FixValueHeadModelCallback) | |
if processor is not None: | |
self.add_callback(SaveProcessorCallback(processor)) | |
if finetuning_args.pissa_convert: | |
self.add_callback(PissaConvertCallback) | |
if finetuning_args.use_badam: | |
from badam import BAdamCallback, clip_grad_norm_old_version | |
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator) | |
self.add_callback(BAdamCallback) | |
def create_optimizer(self) -> "torch.optim.Optimizer": | |
if self.optimizer is None: | |
self.optimizer = create_custom_optimizer(self.model, self.args, self.finetuning_args) | |
return super().create_optimizer() | |
def create_scheduler( | |
self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None | |
) -> "torch.optim.lr_scheduler.LRScheduler": | |
create_custom_scheduler(self.args, num_training_steps, optimizer) | |
return super().create_scheduler(num_training_steps, optimizer) | |
def compute_loss( | |
self, model: "PreTrainedModel", inputs: Dict[str, "torch.Tensor"], return_outputs: bool = False | |
) -> Union["torch.Tensor", Tuple["torch.Tensor", List["torch.Tensor"]]]: | |
r""" | |
Computes pairwise loss. The first n examples are chosen and the last n examples are rejected. | |
Subclass and override to inject custom behavior. | |
Note that the first element will be removed from the output tuple. | |
See: https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/trainer.py#L3842 | |
""" | |
_, _, values = model(**inputs, output_hidden_states=True, return_dict=True, use_cache=False) | |
batch_size = inputs["input_ids"].size(0) // 2 | |
chosen_masks, rejected_masks = torch.split(inputs["attention_mask"], batch_size, dim=0) | |
chosen_rewards, rejected_rewards = torch.split(values, batch_size, dim=0) | |
chosen_scores = chosen_rewards.gather(dim=-1, index=(chosen_masks.sum(dim=-1, keepdim=True) - 1)) | |
rejected_scores = rejected_rewards.gather(dim=-1, index=(rejected_masks.sum(dim=-1, keepdim=True) - 1)) | |
chosen_scores, rejected_scores = chosen_scores.squeeze(), rejected_scores.squeeze() | |
loss = -torch.nn.functional.logsigmoid(chosen_scores.float() - rejected_scores.float()).mean() | |
if return_outputs: | |
return loss, (loss, chosen_scores, rejected_scores) | |
else: | |
return loss | |
def save_predictions(self, predict_results: "PredictionOutput") -> None: | |
r""" | |
Saves model predictions to `output_dir`. | |
A custom behavior that not contained in Seq2SeqTrainer. | |
""" | |
if not self.is_world_process_zero(): | |
return | |
output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl") | |
logger.info(f"Saving prediction results to {output_prediction_file}") | |
chosen_scores, rejected_scores = predict_results.predictions | |
with open(output_prediction_file, "w", encoding="utf-8") as writer: | |
res: List[str] = [] | |
for c_score, r_score in zip(chosen_scores, rejected_scores): | |
res.append(json.dumps({"chosen": round(float(c_score), 2), "rejected": round(float(r_score), 2)})) | |
writer.write("\n".join(res)) | |