Description
stringlengths
18
161k
Code
stringlengths
15
300k
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license image to image pipeline using any automodelforimagetoimage this pipeline generates an image based on a previous image input example python from pil import image import requests from transformers import pipeline upscaler pipelineimagetoimage modelcaidasswin2srclassicalsrx264 img image openrequests gethttp images cocodataset orgval2017000000039769 jpg streamtrue raw img img resize64 64 upscaledimg upscalerimg img size 64 64 upscaledimg size 144 144 this image to image pipeline can currently be loaded from pipeline using the following task identifier imagetoimage see the list of available models on huggingface comodelshttps huggingface comodels filterimagetoimage transform the images passed as inputs args images str liststr pil image or listpil image the pipeline handles three types of images a string containing a http link pointing to an image a string containing a local path to an image an image loaded in pil directly the pipeline accepts either a single image or a batch of images which must then be passed as a string images in a batch must all be in the same format all as http links all as local paths or all as pil images timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is used and the call may block forever return an image image image or a list of images listimage image containing results if the input is a single image the return will be also a single image if the input is a list of several images it will return a list of transformed images 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license image to image pipeline using any automodelforimagetoimage this pipeline generates an image based on a previous image input example python from pil import image import requests from transformers import pipeline upscaler pipeline image to image model caidas swin2sr classical sr x2 64 img image open requests get http images cocodataset org val2017 000000039769 jpg stream true raw img img resize 64 64 upscaled_img upscaler img img size 64 64 upscaled_img size 144 144 this image to image pipeline can currently be loaded from pipeline using the following task identifier image to image see the list of available models on huggingface co models https huggingface co models filter image to image transform the image s passed as inputs args images str list str pil image or list pil image the pipeline handles three types of images a string containing a http link pointing to an image a string containing a local path to an image an image loaded in pil directly the pipeline accepts either a single image or a batch of images which must then be passed as a string images in a batch must all be in the same format all as http links all as local paths or all as pil images timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is used and the call may block forever return an image image image or a list of images list image image containing result s if the input is a single image the return will be also a single image if the input is a list of several images it will return a list of transformed images float32 to uint8
from typing import List, Union import numpy as np from ..utils import ( add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ImageToImagePipeline(Pipeline): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type(MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES) def _sanitize_parameters(self, **kwargs): preprocess_params = {} postprocess_params = {} forward_params = {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] if "head_mask" in kwargs: forward_params["head_mask"] = kwargs["head_mask"] return preprocess_params, forward_params, postprocess_params def __call__( self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs ) -> Union["Image.Image", List["Image.Image"]]: return super().__call__(images, **kwargs) def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def preprocess(self, image, timeout=None): image = load_image(image, timeout=timeout) inputs = self.image_processor(images=[image], return_tensors="pt") return inputs def postprocess(self, model_outputs): images = [] if "reconstruction" in model_outputs.keys(): outputs = model_outputs.reconstruction for output in outputs: output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy() output = np.moveaxis(output, source=0, destination=-1) output = (output * 255.0).round().astype(np.uint8) images.append(Image.fromarray(output)) return images if len(images) > 1 else images[0]
image to text pipeline using a automodelforvision2seq this pipeline predicts a caption for a given image example python from transformers import pipeline captioner pipelinemodelydshiehvitgpt2cocoen captionerhttps huggingface codatasetsnarsilimagedummyrawmainparrots png generatedtext two birds are standing next to each other learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this image to text pipeline can currently be loaded from pipeline using the following task identifier imagetotext see the list of available models on huggingface comodelshttps huggingface comodels pipelinetagimagetotext assign labels to the images passed as inputs args images str liststr pil image or listpil image the pipeline handles three types of images a string containing a https link pointing to an image a string containing a local path to an image an image loaded in pil directly the pipeline accepts either a single image or a batch of images maxnewtokens int optional the amount of maximum tokens to generate by default it will use generate default generatekwargs dict optional pass it to send all of these arguments directly to generate allowing full control of this function timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list or a list of list of dict each result comes as a dictionary with the following key generatedtext str the generated text visionencoderdecoder does not support conditional generation git model sets modelinputsinputids none in preprocess when promptnone in batch model the pipeline will group them into a list of none which fail forward avoid this by checking it first fixme we need to pop here due to a difference in how generation py and generation tfutils py parse inputs in the tensorflow version generate raises an error if we don t use inputids whereas the pytorch version matches it with self model maininputname or self model encoder maininputname in the preparemodelinputs method image to text pipeline using a automodelforvision2seq this pipeline predicts a caption for a given image example python from transformers import pipeline captioner pipeline model ydshieh vit gpt2 coco en captioner https huggingface co datasets narsil image_dummy raw main parrots png generated_text two birds are standing next to each other learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this image to text pipeline can currently be loaded from pipeline using the following task identifier image to text see the list of available models on huggingface co models https huggingface co models pipeline_tag image to text assign labels to the image s passed as inputs args images str list str pil image or list pil image the pipeline handles three types of images a string containing a http s link pointing to an image a string containing a local path to an image an image loaded in pil directly the pipeline accepts either a single image or a batch of images max_new_tokens int optional the amount of maximum tokens to generate by default it will use generate default generate_kwargs dict optional pass it to send all of these arguments directly to generate allowing full control of this function timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list or a list of list of dict each result comes as a dictionary with the following key generated_text str the generated text vision encoder decoder does not support conditional generation git model sets model_inputs input_ids none in preprocess when prompt none in batch model the pipeline will group them into a list of none which fail _forward avoid this by checking it first fixme we need to pop here due to a difference in how generation py and generation tf_utils py parse inputs in the tensorflow version generate raises an error if we don t use input_ids whereas the pytorch version matches it with self model main_input_name or self model encoder main_input_name in the _prepare_model_inputs method
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ImageToTextPipeline(Pipeline): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES ) def _sanitize_parameters(self, max_new_tokens=None, generate_kwargs=None, prompt=None, timeout=None): forward_kwargs = {} preprocess_params = {} if prompt is not None: preprocess_params["prompt"] = prompt if timeout is not None: preprocess_params["timeout"] = timeout if generate_kwargs is not None: forward_kwargs["generate_kwargs"] = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: forward_kwargs["generate_kwargs"] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) forward_kwargs["generate_kwargs"]["max_new_tokens"] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): return super().__call__(images, **kwargs) def preprocess(self, image, prompt=None, timeout=None): image = load_image(image, timeout=timeout) if prompt is not None: if not isinstance(prompt, str): raise ValueError( f"Received an invalid text input, got - {type(prompt)} - but expected a single string. " "Note also that one single text can be provided for conditional image to text generation." ) model_type = self.model.config.model_type if model_type == "git": model_inputs = self.image_processor(images=image, return_tensors=self.framework) input_ids = self.tokenizer(text=prompt, add_special_tokens=False).input_ids input_ids = [self.tokenizer.cls_token_id] + input_ids input_ids = torch.tensor(input_ids).unsqueeze(0) model_inputs.update({"input_ids": input_ids}) elif model_type == "pix2struct": model_inputs = self.image_processor(images=image, header_text=prompt, return_tensors=self.framework) elif model_type != "vision-encoder-decoder": model_inputs = self.image_processor(images=image, return_tensors=self.framework) text_inputs = self.tokenizer(prompt, return_tensors=self.framework) model_inputs.update(text_inputs) else: raise ValueError(f"Model type {model_type} does not support conditional text generation") else: model_inputs = self.image_processor(images=image, return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: model_inputs["input_ids"] = None return model_inputs def _forward(self, model_inputs, generate_kwargs=None): if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"], list) and all(x is None for x in model_inputs["input_ids"]) ): model_inputs["input_ids"] = None if generate_kwargs is None: generate_kwargs = {} inputs = model_inputs.pop(self.model.main_input_name) model_outputs = self.model.generate(inputs, **model_inputs, **generate_kwargs) return model_outputs def postprocess(self, model_outputs): records = [] for output_ids in model_outputs: record = { "generated_text": self.tokenizer.decode( output_ids, skip_special_tokens=True, ) } records.append(record) return records
automatic mask generation for images using samformaskgeneration this pipeline predicts binary masks for an image given an image it is a chunkpipeline because you can seperate the points in a minibatch in order to avoid oom issues use the pointsperbatch argument to control the number of points that will be processed at the same time default is 64 the pipeline works in 3 steps 1 preprocess a grid of 1024 points evenly separated is generated along with bounding boxes and point labels for more details on how the points and bounding boxes are created check the generatecropboxes function the image is also preprocessed using the imageprocessor this function yields a minibatch of pointsperbatch 2 forward feeds the outputs of preprocess to the model the image embedding is computed only once calls both self model getimageembeddings and makes sure that the gradients are not computed and the tensors and models are on the same device 3 postprocess the most important part of the automatic mask generation happens here three steps are induced imageprocessor postprocessmasks run on each minibatch loop takes in the raw output masks resizes them according to the image size and transforms there to binary masks imageprocessor filtermasks on each minibatch loop uses both prediouthresh and stabilityscores also applies a variety of filters based on non maximum suppression to remove bad masks imageprocessor postprocessmasksforamg applies the nsm on the mask to only keep relevant ones arguments model pretrainedmodel or tfpretrainedmodel the model that will be used by the pipeline to make predictions this needs to be a model inheriting from pretrainedmodel for pytorch and tfpretrainedmodel for tensorflow tokenizer pretrainedtokenizer the tokenizer that will be used by the pipeline to encode data for the model this object inherits from pretrainedtokenizer featureextractor sequencefeatureextractor the feature extractor that will be used by the pipeline to encode the input pointsperbatch optional int default to 64 sets the number of points run simultaneously by the model higher numbers may be faster but use more gpu memory outputbboxesmask bool optional default to false whether or not to output the bounding box predictions outputrlemasks bool optional default to false whether or not to output the masks in rle format example python from transformers import pipeline generator pipelinemodelfacebooksamvitbase taskmaskgeneration outputs generator http images cocodataset orgval2017000000039769 jpg outputs generator https huggingface codatasetsnarsilimagedummyrawmainparrots png pointsperbatch128 learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this segmentation pipeline can currently be loaded from pipeline using the following task identifier maskgeneration see the list of available models on huggingface comodelshttps huggingface comodels filtermaskgeneration preprocess args postprocess args generates binary segmentation masks args inputs np ndarray or bytes or str or dict image or list of images maskthreshold float optional defaults to 0 0 threshold to use when turning the predicted masks into binary values prediouthresh float optional defaults to 0 88 a filtering threshold in 0 1 applied on the model s predicted mask quality stabilityscorethresh float optional defaults to 0 95 a filtering threshold in 0 1 using the stability of the mask under changes to the cutoff used to binarize the model s mask predictions stabilityscoreoffset int optional defaults to 1 the amount to shift the cutoff when calculated the stability score cropsnmsthresh float optional defaults to 0 7 the box iou cutoff used by nonmaximal suppression to filter duplicate masks cropsnlayers int optional defaults to 0 if cropsnlayers0 mask prediction will be run again on crops of the image sets the number of layers to run where each layer has 2ilayer number of image crops cropoverlapratio float optional defaults to 512 1500 sets the degree to which crops overlap in the first crop layer crops will overlap by this fraction of the image length later layers with more crops scale down this overlap cropnpointsdownscalefactor int optional defaults to 1 the number of pointsperside sampled in layer n is scaled down by cropnpointsdownscalefactorn timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return dict a dictionary with the following keys mask pil image a binary mask of the detected object as a pil image of shape width height of the original image returns a mask filled with zeros if no object is found score optional float optionally when the model is capable of estimating a confidence of the object described by the label and the mask post processing happens here in order to avoid cpu gpu copies of all the masks automatic mask generation for images using samformaskgeneration this pipeline predicts binary masks for an image given an image it is a chunkpipeline because you can seperate the points in a mini batch in order to avoid oom issues use the points_per_batch argument to control the number of points that will be processed at the same time default is 64 the pipeline works in 3 steps 1 preprocess a grid of 1024 points evenly separated is generated along with bounding boxes and point labels for more details on how the points and bounding boxes are created check the _generate_crop_boxes function the image is also preprocessed using the image_processor this function yields a minibatch of points_per_batch 2 forward feeds the outputs of preprocess to the model the image embedding is computed only once calls both self model get_image_embeddings and makes sure that the gradients are not computed and the tensors and models are on the same device 3 postprocess the most important part of the automatic mask generation happens here three steps are induced image_processor postprocess_masks run on each minibatch loop takes in the raw output masks resizes them according to the image size and transforms there to binary masks image_processor filter_masks on each minibatch loop uses both pred_iou_thresh and stability_scores also applies a variety of filters based on non maximum suppression to remove bad masks image_processor postprocess_masks_for_amg applies the nsm on the mask to only keep relevant ones arguments model pretrainedmodel or tfpretrainedmodel the model that will be used by the pipeline to make predictions this needs to be a model inheriting from pretrainedmodel for pytorch and tfpretrainedmodel for tensorflow tokenizer pretrainedtokenizer the tokenizer that will be used by the pipeline to encode data for the model this object inherits from pretrainedtokenizer feature_extractor sequencefeatureextractor the feature extractor that will be used by the pipeline to encode the input points_per_batch optional int default to 64 sets the number of points run simultaneously by the model higher numbers may be faster but use more gpu memory output_bboxes_mask bool optional default to false whether or not to output the bounding box predictions output_rle_masks bool optional default to false whether or not to output the masks in rle format example python from transformers import pipeline generator pipeline model facebook sam vit base task mask generation outputs generator http images cocodataset org val2017 000000039769 jpg outputs generator https huggingface co datasets narsil image_dummy raw main parrots png points_per_batch 128 learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this segmentation pipeline can currently be loaded from pipeline using the following task identifier mask generation see the list of available models on huggingface co models https huggingface co models filter mask generation preprocess args postprocess args generates binary segmentation masks args inputs np ndarray or bytes or str or dict image or list of images mask_threshold float optional defaults to 0 0 threshold to use when turning the predicted masks into binary values pred_iou_thresh float optional defaults to 0 88 a filtering threshold in 0 1 applied on the model s predicted mask quality stability_score_thresh float optional defaults to 0 95 a filtering threshold in 0 1 using the stability of the mask under changes to the cutoff used to binarize the model s mask predictions stability_score_offset int optional defaults to 1 the amount to shift the cutoff when calculated the stability score crops_nms_thresh float optional defaults to 0 7 the box iou cutoff used by non maximal suppression to filter duplicate masks crops_n_layers int optional defaults to 0 if crops_n_layers 0 mask prediction will be run again on crops of the image sets the number of layers to run where each layer has 2 i_layer number of image crops crop_overlap_ratio float optional defaults to 512 1500 sets the degree to which crops overlap in the first crop layer crops will overlap by this fraction of the image length later layers with more crops scale down this overlap crop_n_points_downscale_factor int optional defaults to 1 the number of points per side sampled in layer n is scaled down by crop_n_points_downscale_factor n timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return dict a dictionary with the following keys mask pil image a binary mask of the detected object as a pil image of shape width height of the original image returns a mask filled with zeros if no object is found score optional float optionally when the model is capable of estimating a confidence of the object described by the label and the mask post processing happens here in order to avoid cpu gpu copies of all the masks
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class MaskGenerationPipeline(ChunkPipeline): def __init__(self, **kwargs): super().__init__(**kwargs) requires_backends(self, "vision") requires_backends(self, "torch") if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch.") self.check_model_type(MODEL_FOR_MASK_GENERATION_MAPPING_NAMES) def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} postprocess_kwargs = {} forward_params = {} if "points_per_batch" in kwargs: preprocess_kwargs["points_per_batch"] = kwargs["points_per_batch"] if "points_per_crop" in kwargs: preprocess_kwargs["points_per_crop"] = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: preprocess_kwargs["crops_n_layers"] = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: preprocess_kwargs["crop_overlap_ratio"] = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: preprocess_kwargs["crop_n_points_downscale_factor"] = kwargs["crop_n_points_downscale_factor"] if "timeout" in kwargs: preprocess_kwargs["timeout"] = kwargs["timeout"] if "pred_iou_thresh" in kwargs: forward_params["pred_iou_thresh"] = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: forward_params["stability_score_offset"] = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: forward_params["mask_threshold"] = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: forward_params["stability_score_thresh"] = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: postprocess_kwargs["crops_nms_thresh"] = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: postprocess_kwargs["output_rle_mask"] = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: postprocess_kwargs["output_bboxes_mask"] = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__(self, image, *args, num_workers=None, batch_size=None, **kwargs): return super().__call__(image, *args, num_workers=num_workers, batch_size=batch_size, **kwargs) def preprocess( self, image, points_per_batch=64, crops_n_layers: int = 0, crop_overlap_ratio: float = 512 / 1500, points_per_crop: Optional[int] = 32, crop_n_points_downscale_factor: Optional[int] = 1, timeout: Optional[float] = None, ): image = load_image(image, timeout=timeout) target_size = self.image_processor.size["longest_edge"] crop_boxes, grid_points, cropped_images, input_labels = self.image_processor.generate_crop_boxes( image, target_size, crops_n_layers, crop_overlap_ratio, points_per_crop, crop_n_points_downscale_factor ) model_inputs = self.image_processor(images=cropped_images, return_tensors="pt") with self.device_placement(): if self.framework == "pt": inference_context = self.get_inference_context() with inference_context(): model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device) image_embeddings = self.model.get_image_embeddings(model_inputs.pop("pixel_values")) model_inputs["image_embeddings"] = image_embeddings n_points = grid_points.shape[1] points_per_batch = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0, n_points, points_per_batch): batched_points = grid_points[:, i : i + points_per_batch, :, :] labels = input_labels[:, i : i + points_per_batch] is_last = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _forward( self, model_inputs, pred_iou_thresh=0.88, stability_score_thresh=0.95, mask_threshold=0, stability_score_offset=1, ): input_boxes = model_inputs.pop("input_boxes") is_last = model_inputs.pop("is_last") original_sizes = model_inputs.pop("original_sizes").tolist() reshaped_input_sizes = model_inputs.pop("reshaped_input_sizes").tolist() model_outputs = self.model(**model_inputs) low_resolution_masks = model_outputs["pred_masks"] masks = self.image_processor.post_process_masks( low_resolution_masks, original_sizes, reshaped_input_sizes, mask_threshold, binarize=False ) iou_scores = model_outputs["iou_scores"] masks, iou_scores, boxes = self.image_processor.filter_masks( masks[0], iou_scores[0], original_sizes[0], input_boxes[0], pred_iou_thresh, stability_score_thresh, mask_threshold, stability_score_offset, ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def postprocess( self, model_outputs, output_rle_mask=False, output_bboxes_mask=False, crops_nms_thresh=0.7, ): all_scores = [] all_masks = [] all_boxes = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores")) all_masks.extend(model_output.pop("masks")) all_boxes.append(model_output.pop("boxes")) all_scores = torch.cat(all_scores) all_boxes = torch.cat(all_boxes) output_masks, iou_scores, rle_mask, bounding_boxes = self.image_processor.post_process_for_mask_generation( all_masks, all_scores, all_boxes, crops_nms_thresh ) extra = defaultdict(list) for output in model_outputs: for k, v in output.items(): extra[k].append(v) optional = {} if output_rle_mask: optional["rle_mask"] = rle_mask if output_bboxes_mask: optional["bounding_boxes"] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
object detection pipeline using any automodelforobjectdetection this pipeline predicts bounding boxes of objects and their classes example python from transformers import pipeline detector pipelinemodelfacebookdetrresnet50 detectorhttps huggingface codatasetsnarsilimagedummyrawmainparrots png score 0 997 label bird box xmin 69 ymin 171 xmax 396 ymax 507 score 0 999 label bird box xmin 398 ymin 105 xmax 767 ymax 507 x y are expressed relative to the top left hand corner learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this object detection pipeline can currently be loaded from pipeline using the following task identifier objectdetection see the list of available models on huggingface comodelshttps huggingface comodels filterobjectdetection detect objects bounding boxes classes in the images passed as inputs args images str liststr pil image or listpil image the pipeline handles three types of images a string containing an https link pointing to an image a string containing a local path to an image an image loaded in pil directly the pipeline accepts either a single image or a batch of images images in a batch must all be in the same format all as https links all as local paths or all as pil images threshold float optional defaults to 0 9 the probability necessary to make a prediction timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list of dictionaries or a list of list of dictionaries containing the result if the input is a single image will return a list of dictionaries if the input is a list of several images will return a list of list of dictionaries corresponding to each image the dictionaries contain the following keys label str the class label identified by the model score float the score attributed by the model for that label box listdictstr int the bounding box of detected object in image s original size this is a layoutlmfortokenclassification variant the ocr got the boxes and the model classified the words this is a regular forobjectdetectionmodel scores score x turns list xmin xmax ymin ymax into dict xmin xmin args box torch tensor tensor containing the coordinates in corners format returns bbox dictstr int dict containing the coordinates in corners format object detection pipeline using any automodelforobjectdetection this pipeline predicts bounding boxes of objects and their classes example python from transformers import pipeline detector pipeline model facebook detr resnet 50 detector https huggingface co datasets narsil image_dummy raw main parrots png score 0 997 label bird box xmin 69 ymin 171 xmax 396 ymax 507 score 0 999 label bird box xmin 398 ymin 105 xmax 767 ymax 507 x y are expressed relative to the top left hand corner learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this object detection pipeline can currently be loaded from pipeline using the following task identifier object detection see the list of available models on huggingface co models https huggingface co models filter object detection detect objects bounding boxes classes in the image s passed as inputs args images str list str pil image or list pil image the pipeline handles three types of images a string containing an http s link pointing to an image a string containing a local path to an image an image loaded in pil directly the pipeline accepts either a single image or a batch of images images in a batch must all be in the same format all as http s links all as local paths or all as pil images threshold float optional defaults to 0 9 the probability necessary to make a prediction timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list of dictionaries or a list of list of dictionaries containing the result if the input is a single image will return a list of dictionaries if the input is a list of several images will return a list of list of dictionaries corresponding to each image the dictionaries contain the following keys label str the class label identified by the model score float the score attributed by the model for that label box list dict str int the bounding box of detected object in image s original size this is a layoutlmfortokenclassification variant the ocr got the boxes and the model classified the words this is a regular forobjectdetectionmodel scores score x turns list xmin xmax ymin ymax into dict xmin xmin args box torch tensor tensor containing the coordinates in corners format returns bbox dict str int dict containing the coordinates in corners format
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import ( MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, ) logger = logging.get_logger(__name__) Prediction = Dict[str, Any] Predictions = List[Prediction] @add_end_docstrings(PIPELINE_INIT_ARGS) class ObjectDetectionPipeline(Pipeline): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES.copy() mapping.update(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES) self.check_model_type(mapping) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] postprocess_kwargs = {} if "threshold" in kwargs: postprocess_kwargs["threshold"] = kwargs["threshold"] return preprocess_params, {}, postprocess_kwargs def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]: return super().__call__(*args, **kwargs) def preprocess(self, image, timeout=None): image = load_image(image, timeout=timeout) target_size = torch.IntTensor([[image.height, image.width]]) inputs = self.image_processor(images=[image], return_tensors="pt") if self.tokenizer is not None: inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt") inputs["target_size"] = target_size return inputs def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") outputs = self.model(**model_inputs) model_outputs = outputs.__class__({"target_size": target_size, **outputs}) if self.tokenizer is not None: model_outputs["bbox"] = model_inputs["bbox"] return model_outputs def postprocess(self, model_outputs, threshold=0.9): target_size = model_outputs["target_size"] if self.tokenizer is not None: height, width = target_size[0].tolist() def unnormalize(bbox): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1) labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()] boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)] keys = ["score", "label", "box"] annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold] else: raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size) raw_annotation = raw_annotations[0] scores = raw_annotation["scores"] labels = raw_annotation["labels"] boxes = raw_annotation["boxes"] raw_annotation["scores"] = scores.tolist() raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels] raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes] keys = ["score", "label", "box"] annotation = [ dict(zip(keys, vals)) for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"]) ] return annotation def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: if self.framework != "pt": raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.") xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
roughly equivalent to for item in loader yield inferitem params arguments loader torch utils data dataloader or any iterator the iterator that will be used to apply infer on infer any function the function to apply of each element of loader params dict the parameters passed to infer along with every item loaderbatchsize int optional if specified the items of loader are supposed to come as batch and are loaderbatched here making it roughly behave as for items in loader for i in loaderbatchsize item itemsi yield inferitem params self loader loader self infer infer self params params if loaderbatchsize 1 let s spare some time by deactivating altogether loaderbatchsize none self loaderbatchsize loaderbatchsize internal bookkeeping self loaderbatchindex none self loaderbatchdata none def lenself return lenself loader def iterself self iterator iterself loader return self def loaderbatchitemself if isinstanceself loaderbatchdata torch tensor batch data is simple tensor just fetch the slice result self loaderbatchdataself loaderbatchindex else batch data is assumed to be basemodeloutput or dict loaderbatched for k element in self loaderbatchdata items if isinstanceelement modeloutput convert modeloutput to tuple first element element totuple if isinstanceelement0 torch tensor loaderbatchedk tupleelself loaderbatchindex unsqueeze0 for el in element elif isinstanceelement0 np ndarray loaderbatchedk tuplenp expanddimselself loaderbatchindex 0 for el in element continue if k in hiddenstates pastkeyvalues attentions and isinstanceelement tuple those are stored as lists of tensors so need specific unbatching if isinstanceelement0 torch tensor loaderbatchedk tupleelself loaderbatchindex unsqueeze0 for el in element elif isinstanceelement0 np ndarray loaderbatchedk tuplenp expanddimselself loaderbatchindex 0 for el in element continue if element is none this can happen for optional data that get passed around loaderbatchedk none elif isinstanceelementself loaderbatchindex torch tensor take correct batch data but make it looked like batchsize1 for compatibility with other methods within transformers loaderbatchedk elementself loaderbatchindex unsqueeze0 elif isinstanceelementself loaderbatchindex np ndarray take correct batch data but make it looked like batchsize1 for compatibility with other methods within transformers loaderbatchedk np expanddimselementself loaderbatchindex 0 else this is typically a list so no need to unsqueeze loaderbatchedk elementself loaderbatchindex recreate the element by reusing the original class to make it look batchsize1 result self loaderbatchdata classloaderbatched self loaderbatchindex 1 return result def nextself if self loaderbatchindex is not none and self loaderbatchindex self loaderbatchsize we are currently unrolling a batch so we just need to return the current item within a batch return self loaderbatchitem we re out of items within a batch item nextself iterator processed self inferitem self params we now have a batch of inferred things if self loaderbatchsize is not none try to infer the size of the batch if isinstanceprocessed torch tensor firsttensor processed else key listprocessed keys0 firsttensor processedkey if isinstancefirsttensor list observedbatchsize lenfirsttensor else observedbatchsize firsttensor shape0 if 0 observedbatchsize self loaderbatchsize could be last batch so we can t unroll as many elements self loaderbatchsize observedbatchsize setting internal index to unwrap the batch self loaderbatchdata processed self loaderbatchindex 0 return self loaderbatchitem else we re not unrolling batches return processed class pipelinechunkiteratorpipelineiterator def initself loader infer params loaderbatchsizenone super initloader infer params def iterself self iterator iterself loader self subiterator none return self def nextself if self subiterator is none subiterator none means we haven t started a preprocess iterator so start it self subiterator self infernextself iterator self params try try to return next item processed nextself subiterator except stopiteration when a preprocess iterator ends we can start lookig at the next item chunkiterator will keep feeding until all elements of iterator all have created their subiterator and have been iterating against another way to look at it is we re basically flattening lists of lists into a single list but with generators self subiterator self infernextself iterator self params processed nextself subiterator return processed class pipelinepackiteratorpipelineiterator item is batched extremely similar to pipelineiterator in its unpacking mechanism but we have an extra required item which is the presence of islast that is because everything is flattened by pipelinechunkiterator we need to keep track of how to regroup here in the original process boundaries so that process and postprocess see the same data this iterator accumulates items possibly while unbatching until it its a islast and then just passes it on to the caller could be last batch so we can t unroll as many elements roughly equivalent to for item in loader yield infer item params arguments loader torch utils data dataloader or any iterator the iterator that will be used to apply infer on infer any function the function to apply of each element of loader params dict the parameters passed to infer along with every item loader_batch_size int optional if specified the items of loader are supposed to come as batch and are loader_batched here making it roughly behave as for items in loader for i in loader_batch_size item items i yield infer item params let s spare some time by deactivating altogether internal bookkeeping return item located at loader_batch_index within the current loader_batch_data batch data is simple tensor just fetch the slice batch data is assumed to be basemodeloutput or dict convert modeloutput to tuple first those are stored as lists of tensors so need specific unbatching this can happen for optional data that get passed around take correct batch data but make it looked like batch_size 1 for compatibility with other methods within transformers take correct batch data but make it looked like batch_size 1 for compatibility with other methods within transformers this is typically a list so no need to unsqueeze recreate the element by reusing the original class to make it look batch_size 1 we are currently unrolling a batch so we just need to return the current item within a batch we re out of items within a batch we now have a batch of inferred things try to infer the size of the batch could be last batch so we can t unroll as many elements setting internal index to unwrap the batch we re not unrolling batches roughly equivalent to for iterator in loader for item in iterator yield infer item params arguments loader torch utils data dataloader or any iterator the iterator that will be used to apply infer on infer any function the function to apply of each element of loader params dict the parameters passed to infer along with every item try to return next item when a preprocess iterator ends we can start lookig at the next item chunkiterator will keep feeding until all elements of iterator all have created their subiterator and have been iterating against another way to look at it is we re basically flattening lists of lists into a single list but with generators roughly equivalent to packed for item in loader packed append item if item is_last yield packed packed but it also handles cases where item are batched meaning it s a dict of tensor with first dimension 1 in that case it does packed for batch in loader item is batched for item in batch packed append item if item is_last yield packed packed arguments loader torch utils data dataloader or any iterator the iterator that will be used to apply infer on infer any function the function to apply of each element of loader params dict the parameters passed to infer along with every item loader_batch_size int optional if specified the items of loader are supposed to come as batch and are loader_batched here making it roughly behave as for items in loader for i in loader_batch_size item items i yield infer item params extremely similar to pipelineiterator in its unpacking mechanism but we have an extra required item which is the presence of is_last that is because everything is flattened by pipelinechunkiterator we need to keep track of how to regroup here in the original process boundaries so that process and postprocess see the same data this iterator accumulates items possibly while unbatching until it its a is_last and then just passes it on to the caller could be last batch so we can t unroll as many elements
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class PipelineDataset(Dataset): def __init__(self, dataset, process, params): self.dataset = dataset self.process = process self.params = params def __len__(self): return len(self.dataset) def __getitem__(self, i): item = self.dataset[i] processed = self.process(item, **self.params) return processed class PipelineIterator(IterableDataset): def __init__(self, loader, infer, params, loader_batch_size=None): self.loader = loader self.infer = infer self.params = params if loader_batch_size == 1: loader_batch_size = None self.loader_batch_size = loader_batch_size self._loader_batch_index = None self._loader_batch_data = None def __len__(self): return len(self.loader) def __iter__(self): self.iterator = iter(self.loader) return self def loader_batch_item(self): if isinstance(self._loader_batch_data, torch.Tensor): result = self._loader_batch_data[self._loader_batch_index] else: loader_batched = {} for k, element in self._loader_batch_data.items(): if isinstance(element, ModelOutput): element = element.to_tuple() if isinstance(element[0], torch.Tensor): loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(element, tuple): if isinstance(element[0], torch.Tensor): loader_batched[k] = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0], np.ndarray): loader_batched[k] = tuple(np.expand_dims(el[self._loader_batch_index], 0) for el in element) continue if element is None: loader_batched[k] = None elif isinstance(element[self._loader_batch_index], torch.Tensor): loader_batched[k] = element[self._loader_batch_index].unsqueeze(0) elif isinstance(element[self._loader_batch_index], np.ndarray): loader_batched[k] = np.expand_dims(element[self._loader_batch_index], 0) else: loader_batched[k] = element[self._loader_batch_index] result = self._loader_batch_data.__class__(loader_batched) self._loader_batch_index += 1 return result def __next__(self): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: return self.loader_batch_item() item = next(self.iterator) processed = self.infer(item, **self.params) if self.loader_batch_size is not None: if isinstance(processed, torch.Tensor): first_tensor = processed else: key = list(processed.keys())[0] first_tensor = processed[key] if isinstance(first_tensor, list): observed_batch_size = len(first_tensor) else: observed_batch_size = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: self.loader_batch_size = observed_batch_size self._loader_batch_data = processed self._loader_batch_index = 0 return self.loader_batch_item() else: return processed class PipelineChunkIterator(PipelineIterator): def __init__(self, loader, infer, params, loader_batch_size=None): super().__init__(loader, infer, params) def __iter__(self): self.iterator = iter(self.loader) self.subiterator = None return self def __next__(self): if self.subiterator is None: "Subiterator None means we haven't started a `preprocess` iterator. so start it" self.subiterator = self.infer(next(self.iterator), **self.params) try: processed = next(self.subiterator) except StopIteration: self.subiterator = self.infer(next(self.iterator), **self.params) processed = next(self.subiterator) return processed class PipelinePackIterator(PipelineIterator): def __iter__(self): self.iterator = iter(self.loader) return self def __next__(self): is_last = False accumulator = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: item = self.loader_batch_item() is_last = item.pop("is_last") accumulator.append(item) if is_last: return accumulator while not is_last: processed = self.infer(next(self.iterator), **self.params) if self.loader_batch_size is not None: if isinstance(processed, torch.Tensor): first_tensor = processed else: key = list(processed.keys())[0] first_tensor = processed[key] if isinstance(first_tensor, list): observed_batch_size = len(first_tensor) else: observed_batch_size = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: self.loader_batch_size = observed_batch_size self._loader_batch_data = processed self._loader_batch_index = 0 while self._loader_batch_index < self.loader_batch_size: item = self.loader_batch_item() is_last = item.pop("is_last") accumulator.append(item) if is_last: return accumulator else: item = processed is_last = item.pop("is_last") accumulator.append(item) return accumulator class KeyDataset(Dataset): def __init__(self, dataset: Dataset, key: str): self.dataset = dataset self.key = key def __len__(self): return len(self.dataset) def __getitem__(self, i): return self.dataset[i][self.key] class KeyPairDataset(Dataset): def __init__(self, dataset: Dataset, key1: str, key2: str): self.dataset = dataset self.key1 = key1 self.key2 = key2 def __len__(self): return len(self.dataset) def __getitem__(self, i): return {"text": self.dataset[i][self.key1], "text_pair": self.dataset[i][self.key2]}
take the output of any modelforquestionanswering and will generate probabilities for each span to be the actual answer in addition it filters out some unwantedimpossible cases like answer len being greater than maxanswerlen or answer end position being before the starting position the method supports output the kbest answer through the topk argument args start np ndarray individual start probabilities for each token end np ndarray individual end probabilities for each token topk int indicates how many possible answer spans to extract from the model output maxanswerlen int maximum size of the answer to extract from the model s output undesiredtokens np ndarray mask determining tokens that can be part of the answer ensure we have batch axis compute the score of each tuplestart end to be the real answer remove candidate with end start and end start maxanswerlen inspired by chen al https github comfacebookresearchdrqa takes the raw output of any modelforquestionanswering and first normalizes its outputs and then uses decodespans to generate probabilities for each span to be the actual answer args start np ndarray individual start logits for each token end np ndarray individual end logits for each token pmask np ndarray a mask with 1 for values that cannot be in the answer attentionmask np ndarray the attention mask generated by the tokenizer minnullscorefloat the minimum null empty answer score seen so far topk int indicates how many possible answer spans to extract from the model output handleimpossibleanswerbool whether to allow null empty answers maxanswerlen int maximum size of the answer to extract from the model s output ensure padded tokens question tokens cannot belong to the set of candidate answers generate mask make sure noncontext indexes in the tensor cannot contribute to the softmax normalize logits and spans to retrieve the answer mask cls questionansweringpipeline requires the user to provide multiple arguments i e question context to be mapped to internal squadexample questionansweringargumenthandler manages all the possible to create a squadexample from the commandline supplied arguments detect where the actual inputs are generic compatibility with sklearn and keras batched data when user is sending a generator we need to trust it s a valid example normalize inputs copy to avoid overriding arguments question answering pipeline using any modelforquestionanswering see the question answering examples tasksummaryquestionanswering for more information example python from transformers import pipeline oracle pipelinemodeldeepsetrobertabasesquad2 oraclequestionwhere do i live contextmy name is wolfgang and i live in berlin score 0 9191 start 34 end 40 answer berlin learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this question answering pipeline can currently be loaded from pipeline using the following task identifier questionanswering the models that this pipeline can use are models that have been finetuned on a question answering task see the uptodate list of available models on huggingface comodelshttps huggingface comodels filterquestionanswering questionansweringpipeline leverages the squadexample internally this helper method encapsulate all the logic for converting questions and contexts to squadexample we currently support extractive question answering arguments question str or liststr the questions asked context str or liststr the contexts in which we will look for the answer returns one or a list of squadexample the corresponding squadexample grouping question and context set defaults values answer the questions given as inputs by using the contexts args args squadexample or a list of squadexample one or several squadexample containing the question and context x squadexample or a list of squadexample optional one or several squadexample containing the question and context will be treated the same way as if passed as the first positional argument data squadexample or a list of squadexample optional one or several squadexample containing the question and context will be treated the same way as if passed as the first positional argument question str or liststr one or several questions must be used in conjunction with the context argument context str or liststr one or several contexts associated with the questions must be used in conjunction with the question argument topk int optional defaults to 1 the number of answers to return will be chosen by order of likelihood note that we return less than topk answers if there are not enough options available within the context docstride int optional defaults to 128 if the context is too long to fit with the question for the model it will be split in several chunks with some overlap this argument controls the size of that overlap maxanswerlen int optional defaults to 15 the maximum length of predicted answers e g only answers with a shorter length are considered maxseqlen int optional defaults to 384 the maximum length of the total sentence context question in tokens of each chunk passed to the model the context will be split in several chunks using docstride as overlap if needed maxquestionlen int optional defaults to 64 the maximum length of the question after tokenization it will be truncated if needed handleimpossibleanswer bool optional defaults to false whether or not we accept impossible as an answer aligntowords bool optional defaults to true attempts to align the answer to real words improves quality on space separated langages might hurt on nonspaceseparated languages like japanese or chinese return a dict or a list of dict each result comes as a dictionary with the following keys score float the probability associated to the answer start int the character start index of the answer in the tokenized version of the input end int the character end index of the answer in the tokenized version of the input answer str the answer to the question convert inputs to features xxx this is specal argsparser will not handle anything generator or dataset like for those we expect user to send a simple valid example either directly as a squadexample or simple dict so we still need a little sanitation here define the side we want to truncate pad and the textpair sorting when the input is too long it s converted in a batch of inputs with overflowing tokens and a stride of overlap between the inputs if a batch of inputs is given a special output overflowtosamplemapping indicate which member of the encoded batch belong to which original batch sample here we tokenize examples onebyone so we don t need to use overflowtosamplemapping numspan is the number of output samples generated from the overflowing tokens pmask mask with 1 for token than cannot be in the answer 0 for token which can be in an answer we put 0 on the tokens from the context and 1 everywhere else question and special tokens keep the clstoken unmasked some models use it to indicate unanswerable questions we don t use the rest of the values and actually for fast tokenizer we could totally avoid using squadfeatures and squadexample xxxforsequenceclassification models should not use usecachetrue even if it s supported convert the answer tokens back to the original text score score from the model start index of the first character of the answer in the context string end index of the character following the last character of the answer in the context string answer plain text of the answer convert the answer tokens back to the original text score score from the model start index of the first character of the answer in the context string end index of the character following the last character of the answer in the context string answer plain text of the answer encoding was not padded inputids might it doesn t make a difference unless we re padding on the left hand side since now we have different offsets everywhere sometimes the max probability token is in the middle of a word so we start by finding the right word containing the token with tokentoword then we convert this word in a character span with wordtochars some tokenizers don t really handle words keep to offsets then when decoding from token probabilities this method maps token indexes to actual word in the initial context args text str the actual context to extract the answer from start int the answer starting token index end int the answer end token index returns dictionary like answer str start int end int append words if they are in the span stop if we went over the end of the answer append the subtokenization length to the running index join text with spaces take the output of any modelforquestionanswering and will generate probabilities for each span to be the actual answer in addition it filters out some unwanted impossible cases like answer len being greater than max_answer_len or answer end position being before the starting position the method supports output the k best answer through the topk argument args start np ndarray individual start probabilities for each token end np ndarray individual end probabilities for each token topk int indicates how many possible answer span s to extract from the model output max_answer_len int maximum size of the answer to extract from the model s output undesired_tokens np ndarray mask determining tokens that can be part of the answer ensure we have batch axis compute the score of each tuple start end to be the real answer remove candidate with end start and end start max_answer_len inspired by chen al https github com facebookresearch drqa takes the raw output of any modelforquestionanswering and first normalizes its outputs and then uses decode_spans to generate probabilities for each span to be the actual answer args start np ndarray individual start logits for each token end np ndarray individual end logits for each token p_mask np ndarray a mask with 1 for values that cannot be in the answer attention_mask np ndarray the attention mask generated by the tokenizer min_null_score float the minimum null empty answer score seen so far topk int indicates how many possible answer span s to extract from the model output handle_impossible_answer bool whether to allow null empty answers max_answer_len int maximum size of the answer to extract from the model s output ensure padded tokens question tokens cannot belong to the set of candidate answers generate mask make sure non context indexes in the tensor cannot contribute to the softmax normalize logits and spans to retrieve the answer mask cls questionansweringpipeline requires the user to provide multiple arguments i e question context to be mapped to internal squadexample questionansweringargumenthandler manages all the possible to create a squadexample from the command line supplied arguments detect where the actual inputs are generic compatibility with sklearn and keras batched data when user is sending a generator we need to trust it s a valid example normalize inputs copy to avoid overriding arguments question answering pipeline using any modelforquestionanswering see the question answering examples task_summary question answering for more information example python from transformers import pipeline oracle pipeline model deepset roberta base squad2 oracle question where do i live context my name is wolfgang and i live in berlin score 0 9191 start 34 end 40 answer berlin learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this question answering pipeline can currently be loaded from pipeline using the following task identifier question answering the models that this pipeline can use are models that have been fine tuned on a question answering task see the up to date list of available models on huggingface co models https huggingface co models filter question answering questionansweringpipeline leverages the squadexample internally this helper method encapsulate all the logic for converting question s and context s to squadexample we currently support extractive question answering arguments question str or list str the question s asked context str or list str the context s in which we will look for the answer returns one or a list of squadexample the corresponding squadexample grouping question and context set defaults values answer the question s given as inputs by using the context s args args squadexample or a list of squadexample one or several squadexample containing the question and context x squadexample or a list of squadexample optional one or several squadexample containing the question and context will be treated the same way as if passed as the first positional argument data squadexample or a list of squadexample optional one or several squadexample containing the question and context will be treated the same way as if passed as the first positional argument question str or list str one or several question s must be used in conjunction with the context argument context str or list str one or several context s associated with the question s must be used in conjunction with the question argument topk int optional defaults to 1 the number of answers to return will be chosen by order of likelihood note that we return less than topk answers if there are not enough options available within the context doc_stride int optional defaults to 128 if the context is too long to fit with the question for the model it will be split in several chunks with some overlap this argument controls the size of that overlap max_answer_len int optional defaults to 15 the maximum length of predicted answers e g only answers with a shorter length are considered max_seq_len int optional defaults to 384 the maximum length of the total sentence context question in tokens of each chunk passed to the model the context will be split in several chunks using doc_stride as overlap if needed max_question_len int optional defaults to 64 the maximum length of the question after tokenization it will be truncated if needed handle_impossible_answer bool optional defaults to false whether or not we accept impossible as an answer align_to_words bool optional defaults to true attempts to align the answer to real words improves quality on space separated langages might hurt on non space separated languages like japanese or chinese return a dict or a list of dict each result comes as a dictionary with the following keys score float the probability associated to the answer start int the character start index of the answer in the tokenized version of the input end int the character end index of the answer in the tokenized version of the input answer str the answer to the question convert inputs to features xxx this is specal args_parser will not handle anything generator or dataset like for those we expect user to send a simple valid example either directly as a squadexample or simple dict so we still need a little sanitation here define the side we want to truncate pad and the text pair sorting when the input is too long it s converted in a batch of inputs with overflowing tokens and a stride of overlap between the inputs if a batch of inputs is given a special output overflow_to_sample_mapping indicate which member of the encoded batch belong to which original batch sample here we tokenize examples one by one so we don t need to use overflow_to_sample_mapping num_span is the number of output samples generated from the overflowing tokens p_mask mask with 1 for token than cannot be in the answer 0 for token which can be in an answer we put 0 on the tokens from the context and 1 everywhere else question and special tokens keep the cls_token unmasked some models use it to indicate unanswerable questions we don t use the rest of the values and actually for fast tokenizer we could totally avoid using squadfeatures and squadexample xxxforsequenceclassification models should not use use_cache true even if it s supported large and positive convert the answer tokens back to the original text score score from the model start index of the first character of the answer in the context string end index of the character following the last character of the answer in the context string answer plain text of the answer convert the answer tokens back to the original text score score from the model start index of the first character of the answer in the context string end index of the character following the last character of the answer in the context string answer plain text of the answer encoding was not padded input_ids might it doesn t make a difference unless we re padding on the left hand side since now we have different offsets everywhere sometimes the max probability token is in the middle of a word so we start by finding the right word containing the token with token_to_word then we convert this word in a character span with word_to_chars some tokenizers don t really handle words keep to offsets then when decoding from token probabilities this method maps token indexes to actual word in the initial context args text str the actual context to extract the answer from start int the answer starting token index end int the answer end token index returns dictionary like answer str start int end int append words if they are in the span stop if we went over the end of the answer append the subtokenization length to the running index join text with spaces
import inspect import types import warnings from collections.abc import Iterable from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union import numpy as np from ..data import SquadExample, SquadFeatures, squad_convert_examples_to_features from ..modelcard import ModelCard from ..tokenization_utils import PreTrainedTokenizer from ..utils import ( PaddingStrategy, add_end_docstrings, is_tf_available, is_tokenizers_available, is_torch_available, logging, ) from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline logger = logging.get_logger(__name__) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel if is_tokenizers_available(): import tokenizers if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES Dataset = None if is_torch_available(): import torch from torch.utils.data import Dataset from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES def decode_spans( start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray ) -> Tuple: if start.ndim == 1: start = start[None] if end.ndim == 1: end = end[None] outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) candidates = np.tril(np.triu(outer), max_answer_len - 1) scores_flat = candidates.flatten() if topk == 1: idx_sort = [np.argmax(scores_flat)] elif len(scores_flat) < topk: idx_sort = np.argsort(-scores_flat) else: idx = np.argpartition(-scores_flat, topk)[0:topk] idx_sort = idx[np.argsort(-scores_flat[idx])] starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) starts = starts[desired_spans] ends = ends[desired_spans] scores = candidates[0, starts, ends] return starts, ends, scores def select_starts_ends( start, end, p_mask, attention_mask, min_null_score=1000000, top_k=1, handle_impossible_answer=False, max_answer_len=15, ): undesired_tokens = np.abs(np.array(p_mask) - 1) if attention_mask is not None: undesired_tokens = undesired_tokens & attention_mask undesired_tokens_mask = undesired_tokens == 0.0 start = np.where(undesired_tokens_mask, -10000.0, start) end = np.where(undesired_tokens_mask, -10000.0, end) start = np.exp(start - start.max(axis=-1, keepdims=True)) start = start / start.sum() end = np.exp(end - end.max(axis=-1, keepdims=True)) end = end / end.sum() if handle_impossible_answer: min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item()) start[0, 0] = end[0, 0] = 0.0 starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens) return starts, ends, scores, min_null_score class QuestionAnsweringArgumentHandler(ArgumentHandler): def normalize(self, item): if isinstance(item, SquadExample): return item elif isinstance(item, dict): for k in ["question", "context"]: if k not in item: raise KeyError("You need to provide a dictionary with keys {question:..., context:...}") elif item[k] is None: raise ValueError(f"`{k}` cannot be None") elif isinstance(item[k], str) and len(item[k]) == 0: raise ValueError(f"`{k}` cannot be empty") return QuestionAnsweringPipeline.create_sample(**item) raise ValueError(f"{item} argument needs to be of type (SquadExample, dict)") def __call__(self, *args, **kwargs): if args is not None and len(args) > 0: if len(args) == 1: inputs = args[0] elif len(args) == 2 and {type(el) for el in args} == {str}: inputs = [{"question": args[0], "context": args[1]}] else: inputs = list(args) elif "X" in kwargs: inputs = kwargs["X"] elif "data" in kwargs: inputs = kwargs["data"] elif "question" in kwargs and "context" in kwargs: if isinstance(kwargs["question"], list) and isinstance(kwargs["context"], str): inputs = [{"question": Q, "context": kwargs["context"]} for Q in kwargs["question"]] elif isinstance(kwargs["question"], list) and isinstance(kwargs["context"], list): if len(kwargs["question"]) != len(kwargs["context"]): raise ValueError("Questions and contexts don't have the same lengths") inputs = [{"question": Q, "context": C} for Q, C in zip(kwargs["question"], kwargs["context"])] elif isinstance(kwargs["question"], str) and isinstance(kwargs["context"], str): inputs = [{"question": kwargs["question"], "context": kwargs["context"]}] else: raise ValueError("Arguments can't be understood") else: raise ValueError(f"Unknown arguments {kwargs}") generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,) if isinstance(inputs, generator_types): return inputs if isinstance(inputs, dict): inputs = [inputs] elif isinstance(inputs, Iterable): inputs = list(inputs) else: raise ValueError(f"Invalid arguments {kwargs}") for i, item in enumerate(inputs): inputs[i] = self.normalize(item) return inputs @add_end_docstrings(PIPELINE_INIT_ARGS) class QuestionAnsweringPipeline(ChunkPipeline): default_input_names = "question,context" handle_impossible_answer = False def __init__( self, model: Union["PreTrainedModel", "TFPreTrainedModel"], tokenizer: PreTrainedTokenizer, modelcard: Optional[ModelCard] = None, framework: Optional[str] = None, task: str = "", **kwargs, ): super().__init__( model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs, ) self._args_parser = QuestionAnsweringArgumentHandler() self.check_model_type( TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) @staticmethod def create_sample( question: Union[str, List[str]], context: Union[str, List[str]] ) -> Union[SquadExample, List[SquadExample]]: if isinstance(question, list): return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)] else: return SquadExample(None, question, context, None, None, None) def _sanitize_parameters( self, padding=None, topk=None, top_k=None, doc_stride=None, max_answer_len=None, max_seq_len=None, max_question_len=None, handle_impossible_answer=None, align_to_words=None, **kwargs, ): preprocess_params = {} if padding is not None: preprocess_params["padding"] = padding if doc_stride is not None: preprocess_params["doc_stride"] = doc_stride if max_question_len is not None: preprocess_params["max_question_len"] = max_question_len if max_seq_len is not None: preprocess_params["max_seq_len"] = max_seq_len postprocess_params = {} if topk is not None and top_k is None: warnings.warn("topk parameter is deprecated, use top_k instead", UserWarning) top_k = topk if top_k is not None: if top_k < 1: raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") postprocess_params["top_k"] = top_k if max_answer_len is not None: if max_answer_len < 1: raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") if max_answer_len is not None: postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer if align_to_words is not None: postprocess_params["align_to_words"] = align_to_words return preprocess_params, {}, postprocess_params def __call__(self, *args, **kwargs): examples = self._args_parser(*args, **kwargs) if isinstance(examples, (list, tuple)) and len(examples) == 1: return super().__call__(examples[0], **kwargs) return super().__call__(examples, **kwargs) def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None): if isinstance(example, dict): example = SquadExample(None, example["question"], example["context"], None, None, None) if max_seq_len is None: max_seq_len = min(self.tokenizer.model_max_length, 384) if doc_stride is None: doc_stride = min(max_seq_len // 2, 128) if doc_stride > max_seq_len: raise ValueError(f"`doc_stride` ({doc_stride}) is larger than `max_seq_len` ({max_seq_len})") if not self.tokenizer.is_fast: features = squad_convert_examples_to_features( examples=[example], tokenizer=self.tokenizer, max_seq_length=max_seq_len, doc_stride=doc_stride, max_query_length=max_question_len, padding_strategy=PaddingStrategy.MAX_LENGTH, is_training=False, tqdm_enabled=False, ) else: question_first = self.tokenizer.padding_side == "right" encoded_inputs = self.tokenizer( text=example.question_text if question_first else example.context_text, text_pair=example.context_text if question_first else example.question_text, padding=padding, truncation="only_second" if question_first else "only_first", max_length=max_seq_len, stride=doc_stride, return_token_type_ids=True, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, ) num_spans = len(encoded_inputs["input_ids"]) p_mask = [ [tok != 1 if question_first else 0 for tok in encoded_inputs.sequence_ids(span_id)] for span_id in range(num_spans) ] features = [] for span_idx in range(num_spans): input_ids_span_idx = encoded_inputs["input_ids"][span_idx] attention_mask_span_idx = ( encoded_inputs["attention_mask"][span_idx] if "attention_mask" in encoded_inputs else None ) token_type_ids_span_idx = ( encoded_inputs["token_type_ids"][span_idx] if "token_type_ids" in encoded_inputs else None ) if self.tokenizer.cls_token_id is not None: cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] for cls_index in cls_indices: p_mask[span_idx][cls_index] = 0 submask = p_mask[span_idx] features.append( SquadFeatures( input_ids=input_ids_span_idx, attention_mask=attention_mask_span_idx, token_type_ids=token_type_ids_span_idx, p_mask=submask, encoding=encoded_inputs[span_idx], cls_index=None, token_to_orig_map={}, example_index=0, unique_id=0, paragraph_len=0, token_is_max_context=0, tokens=[], start_position=0, end_position=0, is_impossible=False, qas_id=None, ) ) for i, feature in enumerate(features): fw_args = {} others = {} model_input_names = self.tokenizer.model_input_names + ["p_mask", "token_type_ids"] for k, v in feature.__dict__.items(): if k in model_input_names: if self.framework == "tf": tensor = tf.constant(v) if tensor.dtype == tf.int64: tensor = tf.cast(tensor, tf.int32) fw_args[k] = tf.expand_dims(tensor, 0) elif self.framework == "pt": tensor = torch.tensor(v) if tensor.dtype == torch.int32: tensor = tensor.long() fw_args[k] = tensor.unsqueeze(0) else: others[k] = v is_last = i == len(features) - 1 yield {"example": example, "is_last": is_last, **fw_args, **others} def _forward(self, inputs): example = inputs["example"] model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names} model_forward = self.model.forward if self.framework == "pt" else self.model.call if "use_cache" in inspect.signature(model_forward).parameters.keys(): model_inputs["use_cache"] = False output = self.model(**model_inputs) if isinstance(output, dict): return {"start": output["start_logits"], "end": output["end_logits"], "example": example, **inputs} else: start, end = output[:2] return {"start": start, "end": end, "example": example, **inputs} def postprocess( self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, align_to_words=True, ): min_null_score = 1000000 answers = [] for output in model_outputs: start_ = output["start"] end_ = output["end"] example = output["example"] p_mask = output["p_mask"] attention_mask = ( output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None ) starts, ends, scores, min_null_score = select_starts_ends( start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len ) if not self.tokenizer.is_fast: char_to_word = np.array(example.char_to_word_offset) for s, e, score in zip(starts, ends, scores): token_to_orig_map = output["token_to_orig_map"] answers.append( { "score": score.item(), "start": np.where(char_to_word == token_to_orig_map[s])[0][0].item(), "end": np.where(char_to_word == token_to_orig_map[e])[0][-1].item(), "answer": " ".join(example.doc_tokens[token_to_orig_map[s] : token_to_orig_map[e] + 1]), } ) else: question_first = bool(self.tokenizer.padding_side == "right") enc = output["encoding"] if self.tokenizer.padding_side == "left": offset = (output["input_ids"] == self.tokenizer.pad_token_id).numpy().sum() else: offset = 0 sequence_index = 1 if question_first else 0 for s, e, score in zip(starts, ends, scores): s = s - offset e = e - offset start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words) answers.append( { "score": score.item(), "start": start_index, "end": end_index, "answer": example.context_text[start_index:end_index], } ) if handle_impossible_answer: answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""}) answers = sorted(answers, key=lambda x: x["score"], reverse=True)[:top_k] if len(answers) == 1: return answers[0] return answers def get_indices( self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool ) -> Tuple[int, int]: if align_to_words: try: start_word = enc.token_to_word(s) end_word = enc.token_to_word(e) start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0] end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1] except Exception: start_index = enc.offsets[s][0] end_index = enc.offsets[e][1] else: start_index = enc.offsets[s][0] end_index = enc.offsets[e][1] return start_index, end_index def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]: words = [] token_idx = char_start_idx = char_end_idx = chars_idx = 0 for i, word in enumerate(text.split(" ")): token = self.tokenizer.tokenize(word) if start <= token_idx <= end: if token_idx == start: char_start_idx = chars_idx if token_idx == end: char_end_idx = chars_idx + len(word) words += [word] if token_idx > end: break token_idx += len(token) chars_idx += len(word) + 1 return { "answer": " ".join(words), "start": max(0, char_start_idx), "end": min(len(text), char_end_idx), }
handles arguments for the tablequestionansweringpipeline returns tqapipelineinputs of shape table pd dataframe query liststr table pd dataframe query liststr table question answering pipeline using a modelfortablequestionanswering this pipeline is only available in pytorch example python from transformers import pipeline oracle pipelinemodelgoogletapasbasefinetunedwtq table repository transformers datasets tokenizers stars 36542 4512 3934 contributors 651 77 34 programming language python python rust python and nodejs oraclequeryhow many stars does the transformers repository have tabletable answer average 36542 coordinates 0 1 cells 36542 aggregator average learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this tabular question answering pipeline can currently be loaded from pipeline using the following task identifier tablequestionanswering the models that this pipeline can use are models that have been finetuned on a tabular question answering task see the uptodate list of available models on huggingface comodelshttps huggingface comodels filtertablequestionanswering inference used for models that need to process sequences in a sequential fashion like the sqa models which handle conversational query related to a table if sequences have already been processed the token type ids will be created according to the previous answer if sequences have already been processed the token type ids will be created according to the previous answer pipelineinputs self argsparserargs kwargs results super callpipelineinputs kwargs if lenresults 1 return results0 return results def sanitizeparametersself sequentialnone paddingnone truncationnone kwargs preprocessparams if padding is not none preprocessparamspadding padding if truncation is not none preprocessparamstruncation truncation forwardparams if sequential is not none forwardparamssequential sequential return preprocessparams forwardparams def preprocessself pipelineinput sequentialnone paddingtrue truncationnone if truncation is none if self type tapas truncation droprowstofit else truncation donottruncate table query pipelineinputtable pipelineinputquery if table empty raise valueerrortable is empty if query is none or query raise valueerrorquery is empty inputs self tokenizertable query returntensorsself framework truncationtruncation paddingpadding inputstable table return inputs def forwardself modelinputs sequentialfalse table modelinputs poptable if self type tapas if sequential outputs self sequentialinferencemodelinputs else outputs self batchinferencemodelinputs else outputs self model generatemodelinputs modeloutputs modelinputs modelinputs table table outputs outputs return modeloutputs def postprocessself modeloutputs inputs modeloutputsmodelinputs table modeloutputstable outputs modeloutputsoutputs if self type tapas if self aggregate logits logitsagg outputs 2 predictions self tokenizer convertlogitstopredictionsinputs logits logitsagg answercoordinatesbatch aggpredictions predictions aggregators i self model config aggregationlabelspred for i pred in enumerateaggpredictions noagglabelindex self model config noaggregationlabelindex aggregatorsprefix i aggregatorsi for i pred in enumerateaggpredictions if pred noagglabelindex else logits outputs0 predictions self tokenizer convertlogitstopredictionsinputs logits answercoordinatesbatch predictions0 aggregators aggregatorsprefix answers for index coordinates in enumerateanswercoordinatesbatch cells table iatcoordinate for coordinate in coordinates aggregator aggregators getindex aggregatorprefix aggregatorsprefix getindex answer answer aggregatorprefix joincells coordinates coordinates cells table iatcoordinate for coordinate in coordinates if aggregator answeraggregator aggregator answers appendanswer if lenanswer 0 raise pipelineexceptionempty answer else answers answer answer for answer in self tokenizer batchdecodeoutputs skipspecialtokenstrue return answers if lenanswers 1 else answers0 handles arguments for the tablequestionansweringpipeline returns tqa_pipeline_inputs of shape table pd dataframe query list str table pd dataframe query list str table question answering pipeline using a modelfortablequestionanswering this pipeline is only available in pytorch example python from transformers import pipeline oracle pipeline model google tapas base finetuned wtq table repository transformers datasets tokenizers stars 36542 4512 3934 contributors 651 77 34 programming language python python rust python and nodejs oracle query how many stars does the transformers repository have table table answer average 36542 coordinates 0 1 cells 36542 aggregator average learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this tabular question answering pipeline can currently be loaded from pipeline using the following task identifier table question answering the models that this pipeline can use are models that have been fine tuned on a tabular question answering task see the up to date list of available models on huggingface co models https huggingface co models filter table question answering inference used for models that need to process sequences in a sequential fashion like the sqa models which handle conversational query related to a table if sequences have already been processed the token type ids will be created according to the previous answer shape seq_len shape seq_len shape seq_len 7 shape seq_len shape seq_len 7 if sequences have already been processed the token type ids will be created according to the previous answer shape seq_len shape seq_len shape seq_len 7 shape seq_len shape seq_len 7 answers queries according to a table the pipeline accepts several types of inputs which are detailed below pipeline table query pipeline table query pipeline table table query query pipeline table table query query pipeline table table query query pipeline table table query query pipeline table table query query table table query query the table argument should be a dict or a dataframe built from that dict containing the whole table example python data actors brad pitt leonardo di caprio george clooney age 56 45 59 number of movies 87 53 69 date of birth 7 february 1967 10 june 1996 28 november 1967 this dictionary can be passed in as such or can be converted to a pandas dataframe example python import pandas as pd table pd dataframe from_dict data args table pd dataframe or dict pandas dataframe or dictionary that will be converted to a dataframe containing all the table values see above for an example of dictionary query str or list str query or list of queries that will be sent to the model alongside the table sequential bool optional defaults to false whether to do inference sequentially or as a batch batching is faster but models like sqa require the inference to be done sequentially to extract relations within sequences given their conversational nature padding bool str or utils paddingstrategy optional defaults to false activates and controls padding accepts the following values true or longest pad to the longest sequence in the batch or no padding if only a single sequence if provided max_length pad to a maximum length specified with the argument max_length or to the maximum acceptable input length for the model if that argument is not provided false or do_not_pad default no padding i e can output a batch with sequences of different lengths truncation bool str or tapastruncationstrategy optional defaults to false activates and controls truncation accepts the following values true or drop_rows_to_fit truncate to a maximum length specified with the argument max_length or to the maximum acceptable input length for the model if that argument is not provided this will truncate row by row removing rows from the table false or do_not_truncate default no truncation i e can output batch with sequence lengths greater than the model maximum admissible input size return a dictionary or a list of dictionaries containing results each result is a dictionary with the following keys answer str the answer of the query given the table if there is an aggregator the answer will be preceded by aggregator coordinates list tuple int int coordinates of the cells of the answers cells list str list of strings made up of the answer cell values aggregator str if the model has an aggregator this returns the aggregator
import collections import types import numpy as np from ..utils import ( add_end_docstrings, is_tensorflow_probability_available, is_tf_available, is_torch_available, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Dataset, Pipeline, PipelineException if is_torch_available(): import torch from ..models.auto.modeling_auto import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, ) if is_tf_available() and is_tensorflow_probability_available(): import tensorflow as tf import tensorflow_probability as tfp from ..models.auto.modeling_tf_auto import ( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, ) class TableQuestionAnsweringArgumentHandler(ArgumentHandler): def __call__(self, table=None, query=None, **kwargs): requires_backends(self, "pandas") import pandas as pd if table is None: raise ValueError("Keyword argument `table` cannot be None.") elif query is None: if isinstance(table, dict) and table.get("query") is not None and table.get("table") is not None: tqa_pipeline_inputs = [table] elif isinstance(table, list) and len(table) > 0: if not all(isinstance(d, dict) for d in table): raise ValueError( f"Keyword argument `table` should be a list of dict, but is {(type(d) for d in table)}" ) if table[0].get("query") is not None and table[0].get("table") is not None: tqa_pipeline_inputs = table else: raise ValueError( "If keyword argument `table` is a list of dictionaries, each dictionary should have a `table`" f" and `query` key, but only dictionary has keys {table[0].keys()} `table` and `query` keys." ) elif Dataset is not None and isinstance(table, Dataset) or isinstance(table, types.GeneratorType): return table else: raise ValueError( "Invalid input. Keyword argument `table` should be either of type `dict` or `list`, but " f"is {type(table)})" ) else: tqa_pipeline_inputs = [{"table": table, "query": query}] for tqa_pipeline_input in tqa_pipeline_inputs: if not isinstance(tqa_pipeline_input["table"], pd.DataFrame): if tqa_pipeline_input["table"] is None: raise ValueError("Table cannot be None.") tqa_pipeline_input["table"] = pd.DataFrame(tqa_pipeline_input["table"]) return tqa_pipeline_inputs @add_end_docstrings(PIPELINE_INIT_ARGS) class TableQuestionAnsweringPipeline(Pipeline): default_input_names = "table,query" def __init__(self, args_parser=TableQuestionAnsweringArgumentHandler(), *args, **kwargs): super().__init__(*args, **kwargs) self._args_parser = args_parser if self.framework == "tf": mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() mapping.update(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) else: mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES.copy() mapping.update(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES) self.check_model_type(mapping) self.aggregate = bool(getattr(self.model.config, "aggregation_labels", None)) and bool( getattr(self.model.config, "num_aggregation_labels", None) ) self.type = "tapas" if hasattr(self.model.config, "aggregation_labels") else None def batch_inference(self, **inputs): return self.model(**inputs) def sequential_inference(self, **inputs): if self.framework == "pt": all_logits = [] all_aggregations = [] prev_answers = None batch_size = inputs["input_ids"].shape[0] input_ids = inputs["input_ids"].to(self.device) attention_mask = inputs["attention_mask"].to(self.device) token_type_ids = inputs["token_type_ids"].to(self.device) token_type_ids_example = None for index in range(batch_size): if prev_answers is not None: prev_labels_example = token_type_ids_example[:, 3] model_labels = np.zeros_like(prev_labels_example.cpu().numpy()) token_type_ids_example = token_type_ids[index] for i in range(model_labels.shape[0]): segment_id = token_type_ids_example[:, 0].tolist()[i] col_id = token_type_ids_example[:, 1].tolist()[i] - 1 row_id = token_type_ids_example[:, 2].tolist()[i] - 1 if row_id >= 0 and col_id >= 0 and segment_id == 1: model_labels[i] = int(prev_answers[(col_id, row_id)]) token_type_ids_example[:, 3] = torch.from_numpy(model_labels).type(torch.long).to(self.device) input_ids_example = input_ids[index] attention_mask_example = attention_mask[index] token_type_ids_example = token_type_ids[index] outputs = self.model( input_ids=input_ids_example.unsqueeze(0), attention_mask=attention_mask_example.unsqueeze(0), token_type_ids=token_type_ids_example.unsqueeze(0), ) logits = outputs.logits if self.aggregate: all_aggregations.append(outputs.logits_aggregation) all_logits.append(logits) dist_per_token = torch.distributions.Bernoulli(logits=logits) probabilities = dist_per_token.probs * attention_mask_example.type(torch.float32).to( dist_per_token.probs.device ) coords_to_probs = collections.defaultdict(list) for i, p in enumerate(probabilities.squeeze().tolist()): segment_id = token_type_ids_example[:, 0].tolist()[i] col = token_type_ids_example[:, 1].tolist()[i] - 1 row = token_type_ids_example[:, 2].tolist()[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: coords_to_probs[(col, row)].append(p) prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} logits_batch = torch.cat(tuple(all_logits), 0) return (logits_batch,) if not self.aggregate else (logits_batch, torch.cat(tuple(all_aggregations), 0)) else: all_logits = [] all_aggregations = [] prev_answers = None batch_size = inputs["input_ids"].shape[0] input_ids = inputs["input_ids"] attention_mask = inputs["attention_mask"] token_type_ids = inputs["token_type_ids"].numpy() token_type_ids_example = None for index in range(batch_size): if prev_answers is not None: prev_labels_example = token_type_ids_example[:, 3] model_labels = np.zeros_like(prev_labels_example, dtype=np.int32) token_type_ids_example = token_type_ids[index] for i in range(model_labels.shape[0]): segment_id = token_type_ids_example[:, 0].tolist()[i] col_id = token_type_ids_example[:, 1].tolist()[i] - 1 row_id = token_type_ids_example[:, 2].tolist()[i] - 1 if row_id >= 0 and col_id >= 0 and segment_id == 1: model_labels[i] = int(prev_answers[(col_id, row_id)]) token_type_ids_example[:, 3] = model_labels input_ids_example = input_ids[index] attention_mask_example = attention_mask[index] token_type_ids_example = token_type_ids[index] outputs = self.model( input_ids=np.expand_dims(input_ids_example, axis=0), attention_mask=np.expand_dims(attention_mask_example, axis=0), token_type_ids=np.expand_dims(token_type_ids_example, axis=0), ) logits = outputs.logits if self.aggregate: all_aggregations.append(outputs.logits_aggregation) all_logits.append(logits) dist_per_token = tfp.distributions.Bernoulli(logits=logits) probabilities = dist_per_token.probs_parameter() * tf.cast(attention_mask_example, tf.float32) coords_to_probs = collections.defaultdict(list) token_type_ids_example = token_type_ids_example for i, p in enumerate(tf.squeeze(probabilities).numpy().tolist()): segment_id = token_type_ids_example[:, 0].tolist()[i] col = token_type_ids_example[:, 1].tolist()[i] - 1 row = token_type_ids_example[:, 2].tolist()[i] - 1 if col >= 0 and row >= 0 and segment_id == 1: coords_to_probs[(col, row)].append(p) prev_answers = {key: np.array(coords_to_probs[key]).mean() > 0.5 for key in coords_to_probs} logits_batch = tf.concat(tuple(all_logits), 0) return (logits_batch,) if not self.aggregate else (logits_batch, tf.concat(tuple(all_aggregations), 0)) def __call__(self, *args, **kwargs): r pipeline_inputs = self._args_parser(*args, **kwargs) results = super().__call__(pipeline_inputs, **kwargs) if len(results) == 1: return results[0] return results def _sanitize_parameters(self, sequential=None, padding=None, truncation=None, **kwargs): preprocess_params = {} if padding is not None: preprocess_params["padding"] = padding if truncation is not None: preprocess_params["truncation"] = truncation forward_params = {} if sequential is not None: forward_params["sequential"] = sequential return preprocess_params, forward_params, {} def preprocess(self, pipeline_input, sequential=None, padding=True, truncation=None): if truncation is None: if self.type == "tapas": truncation = "drop_rows_to_fit" else: truncation = "do_not_truncate" table, query = pipeline_input["table"], pipeline_input["query"] if table.empty: raise ValueError("table is empty") if query is None or query == "": raise ValueError("query is empty") inputs = self.tokenizer(table, query, return_tensors=self.framework, truncation=truncation, padding=padding) inputs["table"] = table return inputs def _forward(self, model_inputs, sequential=False): table = model_inputs.pop("table") if self.type == "tapas": if sequential: outputs = self.sequential_inference(**model_inputs) else: outputs = self.batch_inference(**model_inputs) else: outputs = self.model.generate(**model_inputs) model_outputs = {"model_inputs": model_inputs, "table": table, "outputs": outputs} return model_outputs def postprocess(self, model_outputs): inputs = model_outputs["model_inputs"] table = model_outputs["table"] outputs = model_outputs["outputs"] if self.type == "tapas": if self.aggregate: logits, logits_agg = outputs[:2] predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits, logits_agg) answer_coordinates_batch, agg_predictions = predictions aggregators = {i: self.model.config.aggregation_labels[pred] for i, pred in enumerate(agg_predictions)} no_agg_label_index = self.model.config.no_aggregation_label_index aggregators_prefix = { i: aggregators[i] + " > " for i, pred in enumerate(agg_predictions) if pred != no_agg_label_index } else: logits = outputs[0] predictions = self.tokenizer.convert_logits_to_predictions(inputs, logits) answer_coordinates_batch = predictions[0] aggregators = {} aggregators_prefix = {} answers = [] for index, coordinates in enumerate(answer_coordinates_batch): cells = [table.iat[coordinate] for coordinate in coordinates] aggregator = aggregators.get(index, "") aggregator_prefix = aggregators_prefix.get(index, "") answer = { "answer": aggregator_prefix + ", ".join(cells), "coordinates": coordinates, "cells": [table.iat[coordinate] for coordinate in coordinates], } if aggregator: answer["aggregator"] = aggregator answers.append(answer) if len(answer) == 0: raise PipelineException("Empty answer") else: answers = [{"answer": answer} for answer in self.tokenizer.batch_decode(outputs, skip_special_tokens=True)] return answers if len(answers) > 1 else answers[0]
pipeline for text to text generation using seq2seq models example python from transformers import pipeline generator pipelinemodelmrm8488t5basefinetunedquestiongenerationap generator answer manuel context manuel has created rupertabase with the support of hftransformers and google generatedtext question who created the rupertabase learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial you can pass text generation parameters to this pipeline to control stopping criteria decoding strategy and more learn more about text generation parameters in text generation strategies generationstrategies and text generationtextgeneration this text2textgenerationpipeline pipeline can currently be loaded from pipeline using the following task identifier text2textgeneration the models that this pipeline can use are models that have been finetuned on a translation task see the uptodate list of available models on huggingface comodelshttps huggingface comodels filtertext2textgeneration for a list of available parameters see the following documentationhttps huggingface codocstransformersenmainclassestextgenerationtransformers generation generationmixin generate usage python text2textgenerator pipelinetext2textgeneration text2textgeneratorquestion what is 42 context 42 is the answer to life the universe and everything used in the return key of the pipeline returnname generated def initself args kwargs super initargs kwargs self checkmodeltype tfmodelforseqtoseqcausallmmappingnames if self framework tf else modelforseqtoseqcausallmmappingnames def sanitizeparameters self returntensorsnone returntextnone returntypenone cleanuptokenizationspacesnone truncationnone stopsequencenone generatekwargs preprocessparams if truncation is not none preprocessparamstruncation truncation forwardparams generatekwargs postprocessparams if returntensors is not none and returntype is none returntype returntype tensors if returntensors else returntype text if returntype is not none postprocessparamsreturntype returntype if cleanuptokenizationspaces is not none postprocessparamscleanuptokenizationspaces cleanuptokenizationspaces if stopsequence is not none stopsequenceids self tokenizer encodestopsequence addspecialtokensfalse if lenstopsequenceids 1 warnings warn stopping on a multiple token sequence is not yet supported on transformers the first token of the stop sequence will be used as the stop sequence string in the interim generatekwargseostokenid stopsequenceids0 return preprocessparams forwardparams postprocessparams def checkinputsself inputlength int minlength int maxlength int return true def parseandtokenizeself args truncation prefix self model config prefix if self model config prefix is not none else if isinstanceargs0 list if self tokenizer padtokenid is none raise valueerrorplease make sure that the tokenizer has a padtokenid when using a batch input args prefix arg for arg in args0 padding true elif isinstanceargs0 str args prefix args0 padding false else raise valueerror f args0 args0 have the wrong format the should be either of type str or type list inputs self tokenizerargs paddingpadding truncationtruncation returntensorsself framework this is produced by tokenizers but is an invalid generate kwargs if tokentypeids in inputs del inputstokentypeids return inputs def callself args kwargs r generate the output texts using texts given as inputs args args str or liststr input text for the encoder returntensors bool optional defaults to false whether or not to include the tensors of predictions as token indices in the outputs returntext bool optional defaults to true whether or not to include the decoded texts in the outputs cleanuptokenizationspaces bool optional defaults to false whether or not to clean up the potential extra spaces in the text output truncation truncationstrategy optional defaults to truncationstrategy donottruncate the truncation strategy for the tokenization within the pipeline truncationstrategy donottruncate default will never truncate but it is sometimes desirable to truncate the input to fit the model s maxlength instead of throwing an error down the line generatekwargs additional keyword arguments to pass along to the generate method of the model see the generate method corresponding to your framework here modelgenerativemodels return a list or a list of list of dict each result comes as a dictionary with the following keys generatedtext str present when returntexttrue the generated text generatedtokenids torch tensor or tf tensor present when returntensorstrue the token ids of the generated text summarize news articles and other documents this summarizing pipeline can currently be loaded from pipeline using the following task identifier summarization the models that this pipeline can use are models that have been finetuned on a summarization task which is currently bartlargecnn t5small t5base t5large t53b t511b see the uptodate list of available models on huggingface comodelshttps huggingface comodels filtersummarization for a list of available parameters see the following documentationhttps huggingface codocstransformersenmainclassestextgenerationtransformers generation generationmixin generate usage python use bart in pytorch summarizer pipelinesummarization summarizeran apple a day keeps the doctor away minlength5 maxlength20 use t5 in tf summarizer pipelinesummarization modelt5base tokenizert5base frameworktf summarizeran apple a day keeps the doctor away minlength5 maxlength20 used in the return key of the pipeline returnname summary def callself args kwargs r summarize the texts given as inputs args documents str or liststr one or several articles or one list of articles to summarize returntext bool optional defaults to true whether or not to include the decoded texts in the outputs returntensors bool optional defaults to false whether or not to include the tensors of predictions as token indices in the outputs cleanuptokenizationspaces bool optional defaults to false whether or not to clean up the potential extra spaces in the text output generatekwargs additional keyword arguments to pass along to the generate method of the model see the generate method corresponding to your framework here modelgenerativemodels return a list or a list of list of dict each result comes as a dictionary with the following keys summarytext str present when returntexttrue the summary of the corresponding input summarytokenids torch tensor or tf tensor present when returntensorstrue the token ids of the summary checks whether there might be something wrong with given input with regard to the model translates from one language to another this translation pipeline can currently be loaded from pipeline using the following task identifier translationxxtoyy the models that this pipeline can use are models that have been finetuned on a translation task see the uptodate list of available models on huggingface comodelshttps huggingface comodels filtertranslation for a list of available parameters see the following documentationhttps huggingface codocstransformersenmainclassestextgenerationtransformers generation generationmixin generate usage python enfrtranslator pipelinetranslationentofr enfrtranslatorhow old are you used in the return key of the pipeline returnname translation def checkinputsself inputlength int minlength int maxlength int if inputlength 0 9 maxlength logger warning fyour inputlength inputlength is bigger than 0 9 maxlength maxlength you might consider increasing your maxlength manually e g translator maxlength400 return true def preprocessself args truncationtruncationstrategy donottruncate srclangnone tgtlangnone if getattrself tokenizer buildtranslationinputs none return self tokenizer buildtranslationinputs args returntensorsself framework truncationtruncation srclangsrclang tgtlangtgtlang else return super parseandtokenizeargs truncationtruncation def sanitizeparametersself srclangnone tgtlangnone kwargs preprocessparams forwardparams postprocessparams super sanitizeparameterskwargs if srclang is not none preprocessparamssrclang srclang if tgtlang is not none preprocessparamstgtlang tgtlang if srclang is none and tgtlang is none backward compatibility direct arguments use is preferred task kwargs gettask self task items task split if task and lenitems 4 translation xx to yy preprocessparamssrclang items1 preprocessparamstgtlang items3 return preprocessparams forwardparams postprocessparams def callself args kwargs r translate the texts given as inputs args args str or liststr texts to be translated returntensors bool optional defaults to false whether or not to include the tensors of predictions as token indices in the outputs returntext bool optional defaults to true whether or not to include the decoded texts in the outputs cleanuptokenizationspaces bool optional defaults to false whether or not to clean up the potential extra spaces in the text output srclang str optional the language of the input might be required for multilingual models will not have any effect for single pair translation models tgtlang str optional the language of the desired output might be required for multilingual models will not have any effect for single pair translation models generatekwargs additional keyword arguments to pass along to the generate method of the model see the generate method corresponding to your framework here modelgenerativemodels return a list or a list of list of dict each result comes as a dictionary with the following keys translationtext str present when returntexttrue the translation translationtokenids torch tensor or tf tensor present when returntensorstrue the token ids of the translation pipeline for text to text generation using seq2seq models example python from transformers import pipeline generator pipeline model mrm8488 t5 base finetuned question generation ap generator answer manuel context manuel has created ruperta base with the support of hf transformers and google generated_text question who created the ruperta base learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial you can pass text generation parameters to this pipeline to control stopping criteria decoding strategy and more learn more about text generation parameters in text generation strategies generation_strategies and text generation text_generation this text2textgenerationpipeline pipeline can currently be loaded from pipeline using the following task identifier text2text generation the models that this pipeline can use are models that have been fine tuned on a translation task see the up to date list of available models on huggingface co models https huggingface co models filter text2text generation for a list of available parameters see the following documentation https huggingface co docs transformers en main_classes text_generation transformers generation generationmixin generate usage python text2text_generator pipeline text2text generation text2text_generator question what is 42 context 42 is the answer to life the universe and everything used in the return key of the pipeline checks whether there might be something wrong with given input with regard to the model this is produced by tokenizers but is an invalid generate kwargs generate the output text s using text s given as inputs args args str or list str input text for the encoder return_tensors bool optional defaults to false whether or not to include the tensors of predictions as token indices in the outputs return_text bool optional defaults to true whether or not to include the decoded texts in the outputs clean_up_tokenization_spaces bool optional defaults to false whether or not to clean up the potential extra spaces in the text output truncation truncationstrategy optional defaults to truncationstrategy do_not_truncate the truncation strategy for the tokenization within the pipeline truncationstrategy do_not_truncate default will never truncate but it is sometimes desirable to truncate the input to fit the model s max_length instead of throwing an error down the line generate_kwargs additional keyword arguments to pass along to the generate method of the model see the generate method corresponding to your framework here model generative models return a list or a list of list of dict each result comes as a dictionary with the following keys generated_text str present when return_text true the generated text generated_token_ids torch tensor or tf tensor present when return_tensors true the token ids of the generated text summarize news articles and other documents this summarizing pipeline can currently be loaded from pipeline using the following task identifier summarization the models that this pipeline can use are models that have been fine tuned on a summarization task which is currently bart large cnn t5 small t5 base t5 large t5 3b t5 11b see the up to date list of available models on huggingface co models https huggingface co models filter summarization for a list of available parameters see the following documentation https huggingface co docs transformers en main_classes text_generation transformers generation generationmixin generate usage python use bart in pytorch summarizer pipeline summarization summarizer an apple a day keeps the doctor away min_length 5 max_length 20 use t5 in tf summarizer pipeline summarization model t5 base tokenizer t5 base framework tf summarizer an apple a day keeps the doctor away min_length 5 max_length 20 used in the return key of the pipeline summarize the text s given as inputs args documents str or list str one or several articles or one list of articles to summarize return_text bool optional defaults to true whether or not to include the decoded texts in the outputs return_tensors bool optional defaults to false whether or not to include the tensors of predictions as token indices in the outputs clean_up_tokenization_spaces bool optional defaults to false whether or not to clean up the potential extra spaces in the text output generate_kwargs additional keyword arguments to pass along to the generate method of the model see the generate method corresponding to your framework here model generative models return a list or a list of list of dict each result comes as a dictionary with the following keys summary_text str present when return_text true the summary of the corresponding input summary_token_ids torch tensor or tf tensor present when return_tensors true the token ids of the summary checks whether there might be something wrong with given input with regard to the model translates from one language to another this translation pipeline can currently be loaded from pipeline using the following task identifier translation_xx_to_yy the models that this pipeline can use are models that have been fine tuned on a translation task see the up to date list of available models on huggingface co models https huggingface co models filter translation for a list of available parameters see the following documentation https huggingface co docs transformers en main_classes text_generation transformers generation generationmixin generate usage python en_fr_translator pipeline translation_en_to_fr en_fr_translator how old are you used in the return key of the pipeline backward compatibility direct arguments use is preferred translation xx to yy translate the text s given as inputs args args str or list str texts to be translated return_tensors bool optional defaults to false whether or not to include the tensors of predictions as token indices in the outputs return_text bool optional defaults to true whether or not to include the decoded texts in the outputs clean_up_tokenization_spaces bool optional defaults to false whether or not to clean up the potential extra spaces in the text output src_lang str optional the language of the input might be required for multilingual models will not have any effect for single pair translation models tgt_lang str optional the language of the desired output might be required for multilingual models will not have any effect for single pair translation models generate_kwargs additional keyword arguments to pass along to the generate method of the model see the generate method corresponding to your framework here model generative models return a list or a list of list of dict each result comes as a dictionary with the following keys translation_text str present when return_text true the translation translation_token_ids torch tensor or tf tensor present when return_tensors true the token ids of the translation
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES logger = logging.get_logger(__name__) class ReturnType(enum.Enum): TENSORS = 0 TEXT = 1 @add_end_docstrings(PIPELINE_INIT_ARGS) class Text2TextGenerationPipeline(Pipeline): return_name = "generated" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) def _sanitize_parameters( self, return_tensors=None, return_text=None, return_type=None, clean_up_tokenization_spaces=None, truncation=None, stop_sequence=None, **generate_kwargs, ): preprocess_params = {} if truncation is not None: preprocess_params["truncation"] = truncation forward_params = generate_kwargs postprocess_params = {} if return_tensors is not None and return_type is None: return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: postprocess_params["return_type"] = return_type if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces if stop_sequence is not None: stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) if len(stop_sequence_ids) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) generate_kwargs["eos_token_id"] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def check_inputs(self, input_length: int, min_length: int, max_length: int): return True def _parse_and_tokenize(self, *args, truncation): prefix = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0], list): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input") args = ([prefix + arg for arg in args[0]],) padding = True elif isinstance(args[0], str): args = (prefix + args[0],) padding = False else: raise ValueError( f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" ) inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework) if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__(self, *args, **kwargs): r result = super().__call__(*args, **kwargs) if ( isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]) and all(len(res) == 1 for res in result) ): return [res[0] for res in result] return result def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs): inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs) return inputs def _forward(self, model_inputs, **generate_kwargs): if self.framework == "pt": in_b, input_length = model_inputs["input_ids"].shape elif self.framework == "tf": in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy() self.check_inputs( input_length, generate_kwargs.get("min_length", self.model.config.min_length), generate_kwargs.get("max_length", self.model.config.max_length), ) output_ids = self.model.generate(**model_inputs, **generate_kwargs) out_b = output_ids.shape[0] if self.framework == "pt": output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:]) elif self.framework == "tf": output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:])) return {"output_ids": output_ids} def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False): records = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: record = {f"{self.return_name}_token_ids": output_ids} elif return_type == ReturnType.TEXT: record = { f"{self.return_name}_text": self.tokenizer.decode( output_ids, skip_special_tokens=True, clean_up_tokenization_spaces=clean_up_tokenization_spaces, ) } records.append(record) return records @add_end_docstrings(PIPELINE_INIT_ARGS) class SummarizationPipeline(Text2TextGenerationPipeline): return_name = "summary" def __call__(self, *args, **kwargs): r return super().__call__(*args, **kwargs) def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool: if max_length < min_length: logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.") if input_length < max_length: logger.warning( f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is " "a summarization task, where outputs shorter than the input are typically wanted, you might " f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" ) @add_end_docstrings(PIPELINE_INIT_ARGS) class TranslationPipeline(Text2TextGenerationPipeline): return_name = "translation" def check_inputs(self, input_length: int, min_length: int, max_length: int): if input_length > 0.9 * max_length: logger.warning( f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider " "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None): if getattr(self.tokenizer, "_build_translation_inputs", None): return self.tokenizer._build_translation_inputs( *args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang ) else: return super()._parse_and_tokenize(*args, truncation=truncation) def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs): preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs) if src_lang is not None: preprocess_params["src_lang"] = src_lang if tgt_lang is not None: preprocess_params["tgt_lang"] = tgt_lang if src_lang is None and tgt_lang is None: task = kwargs.get("task", self.task) items = task.split("_") if task and len(items) == 4: preprocess_params["src_lang"] = items[1] preprocess_params["tgt_lang"] = items[3] return preprocess_params, forward_params, postprocess_params def __call__(self, *args, **kwargs): r return super().__call__(*args, **kwargs)
class textclassificationpipelinepipeline returnallscores false functiontoapply classificationfunction none def initself kwargs super initkwargs self checkmodeltype tfmodelforsequenceclassificationmappingnames if self framework tf else modelforsequenceclassificationmappingnames def sanitizeparametersself returnallscoresnone functiontoapplynone topk tokenizerkwargs using as default argument because we re going to use topknone in user code to declare no topk preprocessparams tokenizerkwargs postprocessparams if hasattrself model config returnallscores and returnallscores is none returnallscores self model config returnallscores if isinstancetopk int or topk is none postprocessparamstopk topk postprocessparamslegacy false elif returnallscores is not none warnings warn returnallscores is now deprecated if want a similar functionality use topknone instead of returnallscorestrue or topk1 instead of returnallscoresfalse userwarning if returnallscores postprocessparamstopk none else postprocessparamstopk 1 if isinstancefunctiontoapply str functiontoapply classificationfunctionfunctiontoapply upper if functiontoapply is not none postprocessparamsfunctiontoapply functiontoapply return preprocessparams postprocessparams def callself args kwargs result super callargs kwargs todo try and retrieve it in a nicer way from sanitizeparameters legacy topk not in kwargs if isinstanceargs0 str and legacy this pipeline is odd and return a list when single item is run return result else return result def preprocessself inputs tokenizerkwargs dictstr generictensor returntensors self framework if isinstanceinputs dict return self tokenizerinputs returntensorsreturntensors tokenizerkwargs elif isinstanceinputs list and leninputs 1 and isinstanceinputs0 list and leninputs0 2 it used to be valid to use a list of list of list for text pairs keeping this path for bc return self tokenizer textinputs00 textpairinputs01 returntensorsreturntensors tokenizerkwargs elif isinstanceinputs list this is likely an invalid usage of the pipeline attempting to pass text pairs raise valueerror the pipeline received invalid inputs if you are trying to send text pairs you can try to send a dictionary text my text textpair my pair in order to send a text pair return self tokenizerinputs returntensorsreturntensors tokenizerkwargs def forwardself modelinputs xxxforsequenceclassification models should not use usecachetrue even if it s supported modelforward self model forward if self framework pt else self model call if usecache in inspect signaturemodelforward parameters keys modelinputsusecache false return self modelmodelinputs def postprocessself modeloutputs functiontoapplynone topk1 legacytrue legacy is used to determine if we re running the naked pipeline and in backward compatibility mode or if running the pipeline with pipeline topk1 we re running the more natural result containing the list default value before setparameters if functiontoapply is none if self model config problemtype multilabelclassification or self model config numlabels 1 functiontoapply classificationfunction sigmoid elif self model config problemtype singlelabelclassification or self model config numlabels 1 functiontoapply classificationfunction softmax elif hasattrself model config functiontoapply and functiontoapply is none functiontoapply self model config functiontoapply else functiontoapply classificationfunction none outputs modeloutputslogits0 outputs outputs numpy if functiontoapply classificationfunction sigmoid scores sigmoidoutputs elif functiontoapply classificationfunction softmax scores softmaxoutputs elif functiontoapply classificationfunction none scores outputs else raise valueerrorfunrecognized functiontoapply argument functiontoapply if topk 1 and legacy return label self model config id2labelscores argmax item score scores max item dictscores label self model config id2labeli score score item for i score in enumeratescores if not legacy dictscores sortkeylambda x xscore reversetrue if topk is not none dictscores dictscores topk return dictscores return_all_scores bool optional defaults to false whether to return all prediction scores or just the one of the predicted class function_to_apply str optional defaults to default the function to apply to the model outputs in order to retrieve the scores accepts four different values default if the model has a single label will apply the sigmoid function on the output if the model has several labels will apply the softmax function on the output sigmoid applies the sigmoid function on the output softmax applies the softmax function on the output none does not apply any function on the output text classification pipeline using any modelforsequenceclassification see the sequence classification examples task_summary sequence classification for more information example python from transformers import pipeline classifier pipeline model distilbert base uncased finetuned sst 2 english classifier this movie is disgustingly good label positive score 1 0 classifier director tried too much label negative score 0 996 learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this text classification pipeline can currently be loaded from pipeline using the following task identifier sentiment analysis for classifying sequences according to positive or negative sentiments if multiple classification labels are available model config num_labels 2 the pipeline will run a softmax over the results if there is a single label the pipeline will run a sigmoid over the result the models that this pipeline can use are models that have been fine tuned on a sequence classification task see the up to date list of available models on huggingface co models https huggingface co models filter text classification using as default argument because we re going to use top_k none in user code to declare no top_k classify the text s given as inputs args args str or list str or dict str or list dict str one or several texts to classify in order to use text pairs for your classification you can send a dictionary containing text text_pair keys or a list of those top_k int optional defaults to 1 how many results to return function_to_apply str optional defaults to default the function to apply to the model outputs in order to retrieve the scores accepts four different values if this argument is not specified then it will apply the following functions according to the number of labels if the model has a single label will apply the sigmoid function on the output if the model has several labels will apply the softmax function on the output possible values are sigmoid applies the sigmoid function on the output softmax applies the softmax function on the output none does not apply any function on the output return a list or a list of list of dict each result comes as list of dictionaries with the following keys label str the label predicted score float the corresponding probability if top_k is used one such dictionary is returned per label todo try and retrieve it in a nicer way from _sanitize_parameters this pipeline is odd and return a list when single item is run it used to be valid to use a list of list of list for text pairs keeping this path for bc this is likely an invalid usage of the pipeline attempting to pass text pairs xxxforsequenceclassification models should not use use_cache true even if it s supported _legacy is used to determine if we re running the naked pipeline and in backward compatibility mode or if running the pipeline with pipeline top_k 1 we re running the more natural result containing the list default value before set_parameters
import inspect import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES def sigmoid(_outputs): return 1.0 / (1.0 + np.exp(-_outputs)) def softmax(_outputs): maxes = np.max(_outputs, axis=-1, keepdims=True) shifted_exp = np.exp(_outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class ClassificationFunction(ExplicitEnum): SIGMOID = "sigmoid" SOFTMAX = "softmax" NONE = "none" @add_end_docstrings( PIPELINE_INIT_ARGS, r, ) class TextClassificationPipeline(Pipeline): return_all_scores = False function_to_apply = ClassificationFunction.NONE def __init__(self, **kwargs): super().__init__(**kwargs) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k="", **tokenizer_kwargs): preprocess_params = tokenizer_kwargs postprocess_params = {} if hasattr(self.model.config, "return_all_scores") and return_all_scores is None: return_all_scores = self.model.config.return_all_scores if isinstance(top_k, int) or top_k is None: postprocess_params["top_k"] = top_k postprocess_params["_legacy"] = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", UserWarning, ) if return_all_scores: postprocess_params["top_k"] = None else: postprocess_params["top_k"] = 1 if isinstance(function_to_apply, str): function_to_apply = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: postprocess_params["function_to_apply"] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self, *args, **kwargs): result = super().__call__(*args, **kwargs) _legacy = "top_k" not in kwargs if isinstance(args[0], str) and _legacy: return [result] else: return result def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]: return_tensors = self.framework if isinstance(inputs, dict): return self.tokenizer(**inputs, return_tensors=return_tensors, **tokenizer_kwargs) elif isinstance(inputs, list) and len(inputs) == 1 and isinstance(inputs[0], list) and len(inputs[0]) == 2: return self.tokenizer( text=inputs[0][0], text_pair=inputs[0][1], return_tensors=return_tensors, **tokenizer_kwargs ) elif isinstance(inputs, list): raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs) def _forward(self, model_inputs): model_forward = self.model.forward if self.framework == "pt" else self.model.call if "use_cache" in inspect.signature(model_forward).parameters.keys(): model_inputs["use_cache"] = False return self.model(**model_inputs) def postprocess(self, model_outputs, function_to_apply=None, top_k=1, _legacy=True): if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: function_to_apply = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: function_to_apply = ClassificationFunction.SOFTMAX elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None: function_to_apply = self.model.config.function_to_apply else: function_to_apply = ClassificationFunction.NONE outputs = model_outputs["logits"][0] outputs = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: scores = sigmoid(outputs) elif function_to_apply == ClassificationFunction.SOFTMAX: scores = softmax(outputs) elif function_to_apply == ClassificationFunction.NONE: scores = outputs else: raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}") if top_k == 1 and _legacy: return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()} dict_scores = [ {"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores) ] if not _legacy: dict_scores.sort(key=lambda x: x["score"], reverse=True) if top_k is not None: dict_scores = dict_scores[:top_k] return dict_scores
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license from typing import list union texttoaudio generation pipeline using any automodelfortexttowaveform or automodelfortexttospectrogram this pipeline generates an audio file from an input text and optional other conditional inputs example python from transformers import pipeline pipe pipelinemodelsunobarksmall output pipehey it s huggingface on the phone audio outputaudio samplingrate outputsamplingrate learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial tip you can specify parameters passed to the model by using texttoaudiopipeline call forwardparams or texttoaudiopipeline call generatekwargs example python from transformers import pipeline musicgenerator pipelinetasktexttoaudio modelfacebookmusicgensmall frameworkpt diversify the music generation by adding randomness with a high temperature and set a maximum music length generatekwargs dosample true temperature 0 7 maxnewtokens 35 outputs musicgeneratortechno music with high melodic riffs generatekwargsgeneratekwargs tip this pipeline can currently be loaded from pipeline using the following task identifiers texttospeech or texttoaudio see the list of available models on huggingface comodelshttps huggingface comodels filtertexttospeech get samplingrate from config and generation config bark tokenizer is called with barkprocessor which uses those kwargs priority is given to kwargs we expect some kwargs to be additional tensors which need to be on the right device we expect some kwargs to be additional tensors which need to be on the right device generatekwargs get priority over forwardparams in that case the output is a spectrogram that needs to be converted into a waveform generates speechaudio from the inputs see the texttoaudiopipeline documentation for more information args textinputs str or liststr the texts to generate forwardparams dict optional parameters passed to the model generationforward method forwardparams are always passed to the underlying model generatekwargs dict optional the dictionary of adhoc parametrization of generateconfig to be used for the generation call for a complete overview of generate check the following guidehttps huggingface codocstransformersenmainclassestextgeneration generatekwargs are only passed to the underlying model if the latter is a generative model return a dict or a list of dict the dictionaries have two keys audio np ndarray of shape nbchannels audiolength the generated audio waveform samplingrate int the sampling rate of the generated audio waveform 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license from typing import list union text to audio generation pipeline using any automodelfortexttowaveform or automodelfortexttospectrogram this pipeline generates an audio file from an input text and optional other conditional inputs example python from transformers import pipeline pipe pipeline model suno bark small output pipe hey it s huggingface on the phone audio output audio sampling_rate output sampling_rate learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial tip you can specify parameters passed to the model by using texttoaudiopipeline __call__ forward_params or texttoaudiopipeline __call__ generate_kwargs example python from transformers import pipeline music_generator pipeline task text to audio model facebook musicgen small framework pt diversify the music generation by adding randomness with a high temperature and set a maximum music length generate_kwargs do_sample true temperature 0 7 max_new_tokens 35 outputs music_generator techno music with high melodic riffs generate_kwargs generate_kwargs tip this pipeline can currently be loaded from pipeline using the following task identifiers text to speech or text to audio see the list of available models on huggingface co models https huggingface co models filter text to speech get sampling_rate from config and generation config bark tokenizer is called with barkprocessor which uses those kwargs priority is given to kwargs we expect some kwargs to be additional tensors which need to be on the right device we expect some kwargs to be additional tensors which need to be on the right device generate_kwargs get priority over forward_params you re using the texttoaudiopipeline with a forward only model but generate_kwargs is non empty for forward only tta models please use forward_params instead of of generate_kwargs for reference here are the generate_kwargs used here generate_kwargs keys in that case the output is a spectrogram that needs to be converted into a waveform generates speech audio from the inputs see the texttoaudiopipeline documentation for more information args text_inputs str or list str the text s to generate forward_params dict optional parameters passed to the model generation forward method forward_params are always passed to the underlying model generate_kwargs dict optional the dictionary of ad hoc parametrization of generate_config to be used for the generation call for a complete overview of generate check the following guide https huggingface co docs transformers en main_classes text_generation generate_kwargs are only passed to the underlying model if the latter is a generative model return a dict or a list of dict the dictionaries have two keys audio np ndarray of shape nb_channels audio_length the generated audio waveform sampling_rate int the sampling rate of the generated audio waveform
from typing import List, Union from ..utils import is_torch_available from .base import Pipeline if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING from ..models.speecht5.modeling_speecht5 import SpeechT5HifiGan DEFAULT_VOCODER_ID = "microsoft/speecht5_hifigan" class TextToAudioPipeline(Pipeline): def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs): super().__init__(*args, **kwargs) if self.framework == "tf": raise ValueError("The TextToAudioPipeline is only available in PyTorch.") self.vocoder = None if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values(): self.vocoder = ( SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device) if vocoder is None else vocoder ) self.sampling_rate = sampling_rate if self.vocoder is not None: self.sampling_rate = self.vocoder.config.sampling_rate if self.sampling_rate is None: config = self.model.config gen_config = self.model.__dict__.get("generation_config", None) if gen_config is not None: config.update(gen_config.to_dict()) for sampling_rate_name in ["sample_rate", "sampling_rate"]: sampling_rate = getattr(config, sampling_rate_name, None) if sampling_rate is not None: self.sampling_rate = sampling_rate def preprocess(self, text, **kwargs): if isinstance(text, str): text = [text] if self.model.config.model_type == "bark": new_kwargs = { "max_length": self.model.generation_config.semantic_config.get("max_input_semantic_length", 256), "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } new_kwargs.update(kwargs) kwargs = new_kwargs output = self.tokenizer(text, **kwargs, return_tensors="pt") return output def _forward(self, model_inputs, **kwargs): kwargs = self._ensure_tensor_on_device(kwargs, device=self.device) forward_params = kwargs["forward_params"] generate_kwargs = kwargs["generate_kwargs"] if self.model.can_generate(): generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device) forward_params.update(generate_kwargs) output = self.model.generate(**model_inputs, **forward_params) else: if len(generate_kwargs): raise ValueError( f ) output = self.model(**model_inputs, **forward_params)[0] if self.vocoder is not None: output = self.vocoder(output) return output def __call__(self, text_inputs: Union[str, List[str]], **forward_params): return super().__call__(text_inputs, **forward_params) def _sanitize_parameters( self, preprocess_params=None, forward_params=None, generate_kwargs=None, ): params = { "forward_params": forward_params if forward_params else {}, "generate_kwargs": generate_kwargs if generate_kwargs else {}, } if preprocess_params is None: preprocess_params = {} postprocess_params = {} return preprocess_params, params, postprocess_params def postprocess(self, waveform): output_dict = {} output_dict["audio"] = waveform.cpu().float().numpy() output_dict["sampling_rate"] = self.sampling_rate return output_dict
handles arguments for token classification all the valid aggregation strategies for tokenclassificationpipeline none none simple simple first first average average max max addenddocstrings pipelineinitargs r ignorelabels liststr defaults to o a list of labels to ignore groupedentities bool optional defaults to false deprecated use aggregationstrategy instead whether or not to group the tokens corresponding to the same entity together in the predictions or not stride int optional if stride is provided the pipeline is applied on all the text the text is split into chunks of size modelmaxlength works only with fast tokenizers and aggregationstrategy different from none the value of this argument defines the number of overlapping tokens between chunks in other words the model will shift forward by tokenizer modelmaxlength stride tokens each step aggregationstrategy str optional defaults to none the strategy to fuse or not tokens based on the model prediction none will simply not do any aggregation and simply return raw results from the model simple will attempt to group entities following the default schema a btag b itag c itag d btag2 e btag2 will end up being word abc entity tag word d entity tag2 word e entity tag2 notice that two consecutive b tags will end up as different entities on word based languages we might end up splitting words undesirably imagine microsoft being tagged as word micro entity enterprise word soft entity name look for first max average for ways to mitigate that and disambiguate words on languages that support that meaning which is basically tokens separated by a space these mitigations will only work on real words new york might still be tagged with two different entities first works only on word based models will use the simple strategy except that words cannot end up with different tags words will simply use the tag of the first token of the word when there is ambiguity average works only on word based models will use the simple strategy except that words cannot end up with different tags scores will be averaged first across tokens and then the maximum label is applied max works only on word based models will use the simple strategy except that words cannot end up with different tags word entity will simply be the token with the maximum score named entity recognition pipeline using any modelfortokenclassification see the named entity recognition examples tasksummarynamedentityrecognition for more information example python from transformers import pipeline tokenclassifier pipelinemodeljeanbaptistecamembertner aggregationstrategysimple sentence je m appelle jeanbaptiste et je vis montral tokens tokenclassifiersentence tokens entitygroup per score 0 9931 word jeanbaptiste start 12 end 26 entitygroup loc score 0 998 word montral start 38 end 47 token tokens0 start and end provide an easy way to highlight words in the original text sentencetokenstart tokenend jeanbaptiste some models use the same idea to do part of speech syntaxer pipelinemodelvblagojebertenglishuncasedfinetunedpos aggregationstrategysimple syntaxermy name is sarah and i live in london entitygroup pron score 0 999 word my start 0 end 2 entitygroup noun score 0 997 word name start 3 end 7 entitygroup aux score 0 994 word is start 8 end 10 entitygroup propn score 0 999 word sarah start 11 end 16 entitygroup cconj score 0 999 word and start 17 end 20 entitygroup pron score 0 999 word i start 21 end 22 entitygroup verb score 0 998 word live start 23 end 27 entitygroup adp score 0 999 word in start 28 end 30 entitygroup propn score 0 999 word london start 31 end 37 learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this token recognition pipeline can currently be loaded from pipeline using the following task identifier ner for predicting the classes of tokens in a sequence person organisation location or miscellaneous the models that this pipeline can use are models that have been finetuned on a token classification task see the uptodate list of available models on huggingface comodelshttps huggingface comodels filtertokenclassification classify each token of the texts given as inputs args inputs str or liststr one or several texts or one list of texts for token classification return a list or a list of list of dict each result comes as a list of dictionaries one for each token in the corresponding input or each entity if this pipeline was instantiated with an aggregationstrategy with the following keys word str the tokenword classified this is obtained by decoding the selected tokens if you want to have the exact string in the original sentence use start and end score float the corresponding probability for entity entity str the entity predicted for that tokenword it is named entitygroup when aggregationstrategy is not none index int only present when aggregationstrategynone the index of the corresponding token in the sentence start int optional the index of the start of the corresponding entity in the sentence only exists if the offsets are available within the tokenizer end int optional the index of the end of the corresponding entity in the sentence only exists if the offsets are available within the tokenizer forward filter anything that is in self ignorelabels fuse various numpy arrays into dicts with all the information needed for aggregation preentities for idx tokenscores in enumeratescores filter specialtokens if specialtokensmaskidx continue word self tokenizer convertidstotokensintinputidsidx if offsetmapping is not none startind endind offsetmappingidx if not isinstancestartind int if self framework pt startind startind item endind endind item wordref sentencestartind endind if getattrself tokenizer tokenizer none and getattr self tokenizer tokenizer model continuingsubwordprefix none this is a bpe word aware tokenizer there is a correct way to fuse tokens issubword lenword lenwordref else this is a fallback heuristic this will fail most likely on any kind of text punctuation mixtures that will be considered words non word aware models cannot do better than this unfortunately if aggregationstrategy in aggregationstrategy first aggregationstrategy average aggregationstrategy max warnings warn tokenizer does not support real words using fallback heuristic userwarning issubword startind 0 and not in sentencestartind 1 startind 1 if intinputidsidx self tokenizer unktokenid word wordref issubword false else startind none endind none issubword false preentity word word scores tokenscores start startind end endind index idx issubword issubword preentities appendpreentity return preentities def aggregateself preentities listdict aggregationstrategy aggregationstrategy listdict if aggregationstrategy in aggregationstrategy none aggregationstrategy simple entities for preentity in preentities entityidx preentityscores argmax score preentityscoresentityidx entity entity self model config id2labelentityidx score score index preentityindex word preentityword start preentitystart end preentityend entities appendentity else entities self aggregatewordspreentities aggregationstrategy if aggregationstrategy aggregationstrategy none return entities return self groupentitiesentities def aggregatewordself entities listdict aggregationstrategy aggregationstrategy dict word self tokenizer converttokenstostringentityword for entity in entities if aggregationstrategy aggregationstrategy first scores entities0scores idx scores argmax score scoresidx entity self model config id2labelidx elif aggregationstrategy aggregationstrategy max maxentity maxentities keylambda entity entityscores max scores maxentityscores idx scores argmax score scoresidx entity self model config id2labelidx elif aggregationstrategy aggregationstrategy average scores np stackentityscores for entity in entities averagescores np nanmeanscores axis0 entityidx averagescores argmax entity self model config id2labelentityidx score averagescoresentityidx else raise valueerrorinvalid aggregationstrategy newentity entity entity score score word word start entities0start end entities1end return newentity def aggregatewordsself entities listdict aggregationstrategy aggregationstrategy listdict if aggregationstrategy in aggregationstrategy none aggregationstrategy simple raise valueerrornone and simple strategies are invalid for word aggregation wordentities wordgroup none for entity in entities if wordgroup is none wordgroup entity elif entityissubword wordgroup appendentity else wordentities appendself aggregatewordwordgroup aggregationstrategy wordgroup entity last item if wordgroup is not none wordentities appendself aggregatewordwordgroup aggregationstrategy return wordentities def groupsubentitiesself entities listdict dict get the first entity in the entity group entity entities0entity split 11 scores np nanmeanentityscore for entity in entities tokens entityword for entity in entities entitygroup entitygroup entity score np meanscores word self tokenizer converttokenstostringtokens start entities0start end entities1end return entitygroup def gettagself entityname str tuplestr str if entityname startswithb bi b tag entityname2 elif entityname startswithi bi i tag entityname2 else it s not in b i format default to i for continuation bi i tag entityname return bi tag def groupentitiesself entities listdict listdict entitygroups entitygroupdisagg for entity in entities if not entitygroupdisagg entitygroupdisagg appendentity continue if the current entity is similar and adjacent to the previous entity append it to the disaggregated entity group the split is meant to account for the b and i prefixes shouldn t merge if both entities are btype bi tag self gettagentityentity lastbi lasttag self gettagentitygroupdisagg1entity if tag lasttag and bi b modify subword type to be previoustype entitygroupdisagg appendentity else if the current entity is different from the previous entity aggregate the disaggregated entity group entitygroups appendself groupsubentitiesentitygroupdisagg entitygroupdisagg entity if entitygroupdisagg it s the last entity add it to the entity groups entitygroups appendself groupsubentitiesentitygroupdisagg return entitygroups nerpipeline tokenclassificationpipeline handles arguments for token classification all the valid aggregation strategies for tokenclassificationpipeline ignore_labels list str defaults to o a list of labels to ignore grouped_entities bool optional defaults to false deprecated use aggregation_strategy instead whether or not to group the tokens corresponding to the same entity together in the predictions or not stride int optional if stride is provided the pipeline is applied on all the text the text is split into chunks of size model_max_length works only with fast tokenizers and aggregation_strategy different from none the value of this argument defines the number of overlapping tokens between chunks in other words the model will shift forward by tokenizer model_max_length stride tokens each step aggregation_strategy str optional defaults to none the strategy to fuse or not tokens based on the model prediction none will simply not do any aggregation and simply return raw results from the model simple will attempt to group entities following the default schema a b tag b i tag c i tag d b tag2 e b tag2 will end up being word abc entity tag word d entity tag2 word e entity tag2 notice that two consecutive b tags will end up as different entities on word based languages we might end up splitting words undesirably imagine microsoft being tagged as word micro entity enterprise word soft entity name look for first max average for ways to mitigate that and disambiguate words on languages that support that meaning which is basically tokens separated by a space these mitigations will only work on real words new york might still be tagged with two different entities first works only on word based models will use the simple strategy except that words cannot end up with different tags words will simply use the tag of the first token of the word when there is ambiguity average works only on word based models will use the simple strategy except that words cannot end up with different tags scores will be averaged first across tokens and then the maximum label is applied max works only on word based models will use the simple strategy except that words cannot end up with different tags word entity will simply be the token with the maximum score named entity recognition pipeline using any modelfortokenclassification see the named entity recognition examples task_summary named entity recognition for more information example python from transformers import pipeline token_classifier pipeline model jean baptiste camembert ner aggregation_strategy simple sentence je m appelle jean baptiste et je vis à montréal tokens token_classifier sentence tokens entity_group per score 0 9931 word jean baptiste start 12 end 26 entity_group loc score 0 998 word montréal start 38 end 47 token tokens 0 start and end provide an easy way to highlight words in the original text sentence token start token end jean baptiste some models use the same idea to do part of speech syntaxer pipeline model vblagoje bert english uncased finetuned pos aggregation_strategy simple syntaxer my name is sarah and i live in london entity_group pron score 0 999 word my start 0 end 2 entity_group noun score 0 997 word name start 3 end 7 entity_group aux score 0 994 word is start 8 end 10 entity_group propn score 0 999 word sarah start 11 end 16 entity_group cconj score 0 999 word and start 17 end 20 entity_group pron score 0 999 word i start 21 end 22 entity_group verb score 0 998 word live start 23 end 27 entity_group adp score 0 999 word in start 28 end 30 entity_group propn score 0 999 word london start 31 end 37 learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this token recognition pipeline can currently be loaded from pipeline using the following task identifier ner for predicting the classes of tokens in a sequence person organisation location or miscellaneous the models that this pipeline can use are models that have been fine tuned on a token classification task see the up to date list of available models on huggingface co models https huggingface co models filter token classification classify each token of the text s given as inputs args inputs str or list str one or several texts or one list of texts for token classification return a list or a list of list of dict each result comes as a list of dictionaries one for each token in the corresponding input or each entity if this pipeline was instantiated with an aggregation_strategy with the following keys word str the token word classified this is obtained by decoding the selected tokens if you want to have the exact string in the original sentence use start and end score float the corresponding probability for entity entity str the entity predicted for that token word it is named entity_group when aggregation_strategy is not none index int only present when aggregation_strategy none the index of the corresponding token in the sentence start int optional the index of the start of the corresponding entity in the sentence only exists if the offsets are available within the tokenizer end int optional the index of the end of the corresponding entity in the sentence only exists if the offsets are available within the tokenizer forward filter anything that is in self ignore_labels fuse various numpy arrays into dicts with all the information needed for aggregation filter special_tokens this is a bpe word aware tokenizer there is a correct way to fuse tokens this is a fallback heuristic this will fail most likely on any kind of text punctuation mixtures that will be considered words non word aware models cannot do better than this unfortunately override tokens from a given word that disagree to force agreement on word boundaries example micro soft com pany b ent i name i ent i ent will be rewritten with first strategy as microsoft company b ent i ent last item group together the adjacent tokens with the same entity predicted args entities dict the entities predicted by the pipeline get the first entity in the entity group it s not in b i format default to i for continuation find and group together the adjacent tokens with the same entity predicted args entities dict the entities predicted by the pipeline if the current entity is similar and adjacent to the previous entity append it to the disaggregated entity group the split is meant to account for the b and i prefixes shouldn t merge if both entities are b type modify subword type to be previous_type if the current entity is different from the previous entity aggregate the disaggregated entity group it s the last entity add it to the entity groups
import types import warnings from typing import List, Optional, Tuple, Union import numpy as np from ..models.bert.tokenization_bert import BasicTokenizer from ..utils import ( ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available, ) from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline, Dataset if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES class TokenClassificationArgumentHandler(ArgumentHandler): def __call__(self, inputs: Union[str, List[str]], **kwargs): if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0: inputs = list(inputs) batch_size = len(inputs) elif isinstance(inputs, str): inputs = [inputs] batch_size = 1 elif Dataset is not None and isinstance(inputs, Dataset) or isinstance(inputs, types.GeneratorType): return inputs, None else: raise ValueError("At least one input is required.") offset_mapping = kwargs.get("offset_mapping") if offset_mapping: if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple): offset_mapping = [offset_mapping] if len(offset_mapping) != batch_size: raise ValueError("offset_mapping should have the same batch size as the input") return inputs, offset_mapping class AggregationStrategy(ExplicitEnum): NONE = "none" SIMPLE = "simple" FIRST = "first" AVERAGE = "average" MAX = "max" @add_end_docstrings( PIPELINE_INIT_ARGS, r, ) class TokenClassificationPipeline(ChunkPipeline): default_input_names = "sequences" def __init__(self, args_parser=TokenClassificationArgumentHandler(), *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type( TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) self._basic_tokenizer = BasicTokenizer(do_lower_case=False) self._args_parser = args_parser def _sanitize_parameters( self, ignore_labels=None, grouped_entities: Optional[bool] = None, ignore_subwords: Optional[bool] = None, aggregation_strategy: Optional[AggregationStrategy] = None, offset_mapping: Optional[List[Tuple[int, int]]] = None, stride: Optional[int] = None, ): preprocess_params = {} if offset_mapping is not None: preprocess_params["offset_mapping"] = offset_mapping postprocess_params = {} if grouped_entities is not None or ignore_subwords is not None: if grouped_entities and ignore_subwords: aggregation_strategy = AggregationStrategy.FIRST elif grouped_entities and not ignore_subwords: aggregation_strategy = AggregationStrategy.SIMPLE else: aggregation_strategy = AggregationStrategy.NONE if grouped_entities is not None: warnings.warn( "`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to" f' `aggregation_strategy="{aggregation_strategy}"` instead.' ) if ignore_subwords is not None: warnings.warn( "`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to" f' `aggregation_strategy="{aggregation_strategy}"` instead.' ) if aggregation_strategy is not None: if isinstance(aggregation_strategy, str): aggregation_strategy = AggregationStrategy[aggregation_strategy.upper()] if ( aggregation_strategy in {AggregationStrategy.FIRST, AggregationStrategy.MAX, AggregationStrategy.AVERAGE} and not self.tokenizer.is_fast ): raise ValueError( "Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option" ' to `"simple"` or use a fast tokenizer.' ) postprocess_params["aggregation_strategy"] = aggregation_strategy if ignore_labels is not None: postprocess_params["ignore_labels"] = ignore_labels if stride is not None: if stride >= self.tokenizer.model_max_length: raise ValueError( "`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)" ) if aggregation_strategy == AggregationStrategy.NONE: raise ValueError( "`stride` was provided to process all the text but `aggregation_strategy=" f'"{aggregation_strategy}"`, please select another one instead.' ) else: if self.tokenizer.is_fast: tokenizer_params = { "return_overflowing_tokens": True, "padding": True, "stride": stride, } preprocess_params["tokenizer_params"] = tokenizer_params else: raise ValueError( "`stride` was provided to process all the text but you're using a slow tokenizer." " Please use a fast tokenizer." ) return preprocess_params, {}, postprocess_params def __call__(self, inputs: Union[str, List[str]], **kwargs): _inputs, offset_mapping = self._args_parser(inputs, **kwargs) if offset_mapping: kwargs["offset_mapping"] = offset_mapping return super().__call__(inputs, **kwargs) def preprocess(self, sentence, offset_mapping=None, **preprocess_params): tokenizer_params = preprocess_params.pop("tokenizer_params", {}) truncation = True if self.tokenizer.model_max_length and self.tokenizer.model_max_length > 0 else False inputs = self.tokenizer( sentence, return_tensors=self.framework, truncation=truncation, return_special_tokens_mask=True, return_offsets_mapping=self.tokenizer.is_fast, **tokenizer_params, ) inputs.pop("overflow_to_sample_mapping", None) num_chunks = len(inputs["input_ids"]) for i in range(num_chunks): if self.framework == "tf": model_inputs = {k: tf.expand_dims(v[i], 0) for k, v in inputs.items()} else: model_inputs = {k: v[i].unsqueeze(0) for k, v in inputs.items()} if offset_mapping is not None: model_inputs["offset_mapping"] = offset_mapping model_inputs["sentence"] = sentence if i == 0 else None model_inputs["is_last"] = i == num_chunks - 1 yield model_inputs def _forward(self, model_inputs): special_tokens_mask = model_inputs.pop("special_tokens_mask") offset_mapping = model_inputs.pop("offset_mapping", None) sentence = model_inputs.pop("sentence") is_last = model_inputs.pop("is_last") if self.framework == "tf": logits = self.model(**model_inputs)[0] else: output = self.model(**model_inputs) logits = output["logits"] if isinstance(output, dict) else output[0] return { "logits": logits, "special_tokens_mask": special_tokens_mask, "offset_mapping": offset_mapping, "sentence": sentence, "is_last": is_last, **model_inputs, } def postprocess(self, all_outputs, aggregation_strategy=AggregationStrategy.NONE, ignore_labels=None): if ignore_labels is None: ignore_labels = ["O"] all_entities = [] for model_outputs in all_outputs: logits = model_outputs["logits"][0].numpy() sentence = all_outputs[0]["sentence"] input_ids = model_outputs["input_ids"][0] offset_mapping = ( model_outputs["offset_mapping"][0] if model_outputs["offset_mapping"] is not None else None ) special_tokens_mask = model_outputs["special_tokens_mask"][0].numpy() maxes = np.max(logits, axis=-1, keepdims=True) shifted_exp = np.exp(logits - maxes) scores = shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) if self.framework == "tf": input_ids = input_ids.numpy() offset_mapping = offset_mapping.numpy() if offset_mapping is not None else None pre_entities = self.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy ) grouped_entities = self.aggregate(pre_entities, aggregation_strategy) entities = [ entity for entity in grouped_entities if entity.get("entity", None) not in ignore_labels and entity.get("entity_group", None) not in ignore_labels ] all_entities.extend(entities) num_chunks = len(all_outputs) if num_chunks > 1: all_entities = self.aggregate_overlapping_entities(all_entities) return all_entities def aggregate_overlapping_entities(self, entities): if len(entities) == 0: return entities entities = sorted(entities, key=lambda x: x["start"]) aggregated_entities = [] previous_entity = entities[0] for entity in entities: if previous_entity["start"] <= entity["start"] < previous_entity["end"]: current_length = entity["end"] - entity["start"] previous_length = previous_entity["end"] - previous_entity["start"] if current_length > previous_length: previous_entity = entity elif current_length == previous_length and entity["score"] > previous_entity["score"]: previous_entity = entity else: aggregated_entities.append(previous_entity) previous_entity = entity aggregated_entities.append(previous_entity) return aggregated_entities def gather_pre_entities( self, sentence: str, input_ids: np.ndarray, scores: np.ndarray, offset_mapping: Optional[List[Tuple[int, int]]], special_tokens_mask: np.ndarray, aggregation_strategy: AggregationStrategy, ) -> List[dict]: pre_entities = [] for idx, token_scores in enumerate(scores): if special_tokens_mask[idx]: continue word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])) if offset_mapping is not None: start_ind, end_ind = offset_mapping[idx] if not isinstance(start_ind, int): if self.framework == "pt": start_ind = start_ind.item() end_ind = end_ind.item() word_ref = sentence[start_ind:end_ind] if getattr(self.tokenizer, "_tokenizer", None) and getattr( self.tokenizer._tokenizer.model, "continuing_subword_prefix", None ): is_subword = len(word) != len(word_ref) else: if aggregation_strategy in { AggregationStrategy.FIRST, AggregationStrategy.AVERAGE, AggregationStrategy.MAX, }: warnings.warn( "Tokenizer does not support real words, using fallback heuristic", UserWarning, ) is_subword = start_ind > 0 and " " not in sentence[start_ind - 1 : start_ind + 1] if int(input_ids[idx]) == self.tokenizer.unk_token_id: word = word_ref is_subword = False else: start_ind = None end_ind = None is_subword = False pre_entity = { "word": word, "scores": token_scores, "start": start_ind, "end": end_ind, "index": idx, "is_subword": is_subword, } pre_entities.append(pre_entity) return pre_entities def aggregate(self, pre_entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: if aggregation_strategy in {AggregationStrategy.NONE, AggregationStrategy.SIMPLE}: entities = [] for pre_entity in pre_entities: entity_idx = pre_entity["scores"].argmax() score = pre_entity["scores"][entity_idx] entity = { "entity": self.model.config.id2label[entity_idx], "score": score, "index": pre_entity["index"], "word": pre_entity["word"], "start": pre_entity["start"], "end": pre_entity["end"], } entities.append(entity) else: entities = self.aggregate_words(pre_entities, aggregation_strategy) if aggregation_strategy == AggregationStrategy.NONE: return entities return self.group_entities(entities) def aggregate_word(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> dict: word = self.tokenizer.convert_tokens_to_string([entity["word"] for entity in entities]) if aggregation_strategy == AggregationStrategy.FIRST: scores = entities[0]["scores"] idx = scores.argmax() score = scores[idx] entity = self.model.config.id2label[idx] elif aggregation_strategy == AggregationStrategy.MAX: max_entity = max(entities, key=lambda entity: entity["scores"].max()) scores = max_entity["scores"] idx = scores.argmax() score = scores[idx] entity = self.model.config.id2label[idx] elif aggregation_strategy == AggregationStrategy.AVERAGE: scores = np.stack([entity["scores"] for entity in entities]) average_scores = np.nanmean(scores, axis=0) entity_idx = average_scores.argmax() entity = self.model.config.id2label[entity_idx] score = average_scores[entity_idx] else: raise ValueError("Invalid aggregation_strategy") new_entity = { "entity": entity, "score": score, "word": word, "start": entities[0]["start"], "end": entities[-1]["end"], } return new_entity def aggregate_words(self, entities: List[dict], aggregation_strategy: AggregationStrategy) -> List[dict]: if aggregation_strategy in { AggregationStrategy.NONE, AggregationStrategy.SIMPLE, }: raise ValueError("NONE and SIMPLE strategies are invalid for word aggregation") word_entities = [] word_group = None for entity in entities: if word_group is None: word_group = [entity] elif entity["is_subword"]: word_group.append(entity) else: word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) word_group = [entity] if word_group is not None: word_entities.append(self.aggregate_word(word_group, aggregation_strategy)) return word_entities def group_sub_entities(self, entities: List[dict]) -> dict: entity = entities[0]["entity"].split("-", 1)[-1] scores = np.nanmean([entity["score"] for entity in entities]) tokens = [entity["word"] for entity in entities] entity_group = { "entity_group": entity, "score": np.mean(scores), "word": self.tokenizer.convert_tokens_to_string(tokens), "start": entities[0]["start"], "end": entities[-1]["end"], } return entity_group def get_tag(self, entity_name: str) -> Tuple[str, str]: if entity_name.startswith("B-"): bi = "B" tag = entity_name[2:] elif entity_name.startswith("I-"): bi = "I" tag = entity_name[2:] else: bi = "I" tag = entity_name return bi, tag def group_entities(self, entities: List[dict]) -> List[dict]: entity_groups = [] entity_group_disagg = [] for entity in entities: if not entity_group_disagg: entity_group_disagg.append(entity) continue bi, tag = self.get_tag(entity["entity"]) last_bi, last_tag = self.get_tag(entity_group_disagg[-1]["entity"]) if tag == last_tag and bi != "B": entity_group_disagg.append(entity) else: entity_groups.append(self.group_sub_entities(entity_group_disagg)) entity_group_disagg = [entity] if entity_group_disagg: entity_groups.append(self.group_sub_entities(entity_group_disagg)) return entity_groups NerPipeline = TokenClassificationPipeline
video classification pipeline using any automodelforvideoclassification this pipeline predicts the class of a video this video classification pipeline can currently be loaded from pipeline using the following task identifier videoclassification see the list of available models on huggingface comodelshttps huggingface comodels filtervideoclassification assign labels to the videos passed as inputs args videos str liststr the pipeline handles three types of videos a string containing a http link pointing to a video a string containing a local path to a video the pipeline accepts either a single video or a batch of videos which must then be passed as a string videos in a batch must all be in the same format all as http links or all as local paths topk int optional defaults to 5 the number of top labels that will be returned by the pipeline if the provided number is higher than the number of labels available in the model configuration it will default to the number of labels numframes int optional defaults to self model config numframes the number of frames sampled from the video to run the classification on if not provided will default to the number of frames specified in the model configuration framesamplingrate int optional defaults to 1 the sampling rate used to select frames from the video if not provided will default to 1 i e every frame will be used return a dictionary or a list of dictionaries containing result if the input is a single video will return a dictionary if the input is a list of several videos will return a list of dictionaries corresponding to the videos the dictionaries contain the following keys label str the label identified by the model score int the score attributed by the model for that label video classification pipeline using any automodelforvideoclassification this pipeline predicts the class of a video this video classification pipeline can currently be loaded from pipeline using the following task identifier video classification see the list of available models on huggingface co models https huggingface co models filter video classification assign labels to the video s passed as inputs args videos str list str the pipeline handles three types of videos a string containing a http link pointing to a video a string containing a local path to a video the pipeline accepts either a single video or a batch of videos which must then be passed as a string videos in a batch must all be in the same format all as http links or all as local paths top_k int optional defaults to 5 the number of top labels that will be returned by the pipeline if the provided number is higher than the number of labels available in the model configuration it will default to the number of labels num_frames int optional defaults to self model config num_frames the number of frames sampled from the video to run the classification on if not provided will default to the number of frames specified in the model configuration frame_sampling_rate int optional defaults to 1 the sampling rate used to select frames from the video if not provided will default to 1 i e every frame will be used return a dictionary or a list of dictionaries containing result if the input is a single video will return a dictionary if the input is a list of several videos will return a list of dictionaries corresponding to the videos the dictionaries contain the following keys label str the label identified by the model score int the score attributed by the model for that label
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class VideoClassificationPipeline(Pipeline): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "decord") self.check_model_type(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES) def _sanitize_parameters(self, top_k=None, num_frames=None, frame_sampling_rate=None): preprocess_params = {} if frame_sampling_rate is not None: preprocess_params["frame_sampling_rate"] = frame_sampling_rate if num_frames is not None: preprocess_params["num_frames"] = num_frames postprocess_params = {} if top_k is not None: postprocess_params["top_k"] = top_k return preprocess_params, {}, postprocess_params def __call__(self, videos: Union[str, List[str]], **kwargs): return super().__call__(videos, **kwargs) def preprocess(self, video, num_frames=None, frame_sampling_rate=1): if num_frames is None: num_frames = self.model.config.num_frames if video.startswith("http://") or video.startswith("https://"): video = BytesIO(requests.get(video).content) videoreader = VideoReader(video) videoreader.seek(0) start_idx = 0 end_idx = num_frames * frame_sampling_rate - 1 indices = np.linspace(start_idx, end_idx, num=num_frames, dtype=np.int64) video = videoreader.get_batch(indices).asnumpy() video = list(video) model_inputs = self.image_processor(video, return_tensors=self.framework) return model_inputs def _forward(self, model_inputs): model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": probs = model_outputs.logits.softmax(-1)[0] scores, ids = probs.topk(top_k) else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "label": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
visual question answering pipeline using a automodelforvisualquestionanswering this pipeline is currently only available in pytorch example python from transformers import pipeline oracle pipelinemodeldandelinviltb32finetunedvqa imageurl https huggingface codatasetsnarsilimagedummyrawmainlena png oraclequestionwhat is she wearing imageimageurl score 0 948 answer hat score 0 009 answer fedora score 0 003 answer clothes score 0 003 answer sun hat score 0 002 answer nothing oraclequestionwhat is she wearing imageimageurl topk1 score 0 948 answer hat oraclequestionis this a person imageimageurl topk1 score 0 993 answer yes oraclequestionis this a man imageimageurl topk1 score 0 996 answer no learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this visual question answering pipeline can currently be loaded from pipeline using the following task identifiers visualquestionanswering vqa the models that this pipeline can use are models that have been finetuned on a visual question answering task see the uptodate list of available models on huggingface comodelshttps huggingface comodels filtervisualquestionanswering if isinstanceimage image image str and isinstancequestion str inputs image image question question else inputs image results super callinputs kwargs return results def preprocessself inputs paddingfalse truncationfalse timeoutnone image loadimageinputsimage timeouttimeout modelinputs self tokenizer inputsquestion returntensorsself framework paddingpadding truncationtruncation imagefeatures self imageprocessorimagesimage returntensorsself framework modelinputs updateimagefeatures return modelinputs def forwardself modelinputs if self model cangenerate modeloutputs self model generatemodelinputs else modeloutputs self modelmodelinputs return modeloutputs def postprocessself modeloutputs topk5 if self model cangenerate return answer self tokenizer decodeoutputids skipspecialtokenstrue strip for outputids in modeloutputs else if topk self model config numlabels topk self model config numlabels if self framework pt probs modeloutputs logits sigmoid0 scores ids probs topktopk else raise valueerrorfunsupported framework self framework scores scores tolist ids ids tolist return score score answer self model config id2labelid for score id in zipscores ids visual question answering pipeline using a automodelforvisualquestionanswering this pipeline is currently only available in pytorch example python from transformers import pipeline oracle pipeline model dandelin vilt b32 finetuned vqa image_url https huggingface co datasets narsil image_dummy raw main lena png oracle question what is she wearing image image_url score 0 948 answer hat score 0 009 answer fedora score 0 003 answer clothes score 0 003 answer sun hat score 0 002 answer nothing oracle question what is she wearing image image_url top_k 1 score 0 948 answer hat oracle question is this a person image image_url top_k 1 score 0 993 answer yes oracle question is this a man image image_url top_k 1 score 0 996 answer no learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this visual question answering pipeline can currently be loaded from pipeline using the following task identifiers visual question answering vqa the models that this pipeline can use are models that have been fine tuned on a visual question answering task see the up to date list of available models on huggingface co models https huggingface co models filter visual question answering answers open ended questions about images the pipeline accepts several types of inputs which are detailed below pipeline image image question question pipeline image image question question pipeline image image question question pipeline image image question question image image question question args image str list str pil image or list pil image the pipeline handles three types of images a string containing a http link pointing to an image a string containing a local path to an image an image loaded in pil directly the pipeline accepts either a single image or a batch of images if given a single image it can be broadcasted to multiple questions question str list str the question s asked if given a single question it can be broadcasted to multiple images top_k int optional defaults to 5 the number of top labels that will be returned by the pipeline if the provided number is higher than the number of labels available in the model configuration it will default to the number of labels timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a dictionary or a list of dictionaries containing the result the dictionaries contain the following keys label str the label identified by the model score int the score attributed by the model for that label supports the following format image image question question image image question question generator and datasets
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class VisualQuestionAnsweringPipeline(Pipeline): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES) def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs): preprocess_params, postprocess_params = {}, {} if padding is not None: preprocess_params["padding"] = padding if truncation is not None: preprocess_params["truncation"] = truncation if timeout is not None: preprocess_params["timeout"] = timeout if top_k is not None: postprocess_params["top_k"] = top_k return preprocess_params, {}, postprocess_params def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs): r if isinstance(image, (Image.Image, str)) and isinstance(question, str): inputs = {"image": image, "question": question} else: inputs = image results = super().__call__(inputs, **kwargs) return results def preprocess(self, inputs, padding=False, truncation=False, timeout=None): image = load_image(inputs["image"], timeout=timeout) model_inputs = self.tokenizer( inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation ) image_features = self.image_processor(images=image, return_tensors=self.framework) model_inputs.update(image_features) return model_inputs def _forward(self, model_inputs): if self.model.can_generate(): model_outputs = self.model.generate(**model_inputs) else: model_outputs = self.model(**model_inputs) return model_outputs def postprocess(self, model_outputs, top_k=5): if self.model.can_generate(): return [ {"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()} for output_ids in model_outputs ] else: if top_k > self.model.config.num_labels: top_k = self.model.config.num_labels if self.framework == "pt": probs = model_outputs.logits.sigmoid()[0] scores, ids = probs.topk(top_k) else: raise ValueError(f"Unsupported framework: {self.framework}") scores = scores.tolist() ids = ids.tolist() return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license zero shot audio classification pipeline using clapmodel this pipeline predicts the class of an audio when you provide an audio and a set of candidatelabels example python from transformers import pipeline from datasets import loaddataset dataset loaddatasetashraqesc50 audio nextiterdatasettrainaudioarray classifier pipelinetaskzeroshotaudioclassification modellaionclaphtsatunfused classifieraudio candidatelabelssound of a dog sound of vaccum cleaner score 0 9996 label sound of a dog score 0 0004 label sound of vaccum cleaner learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this audio classification pipeline can currently be loaded from pipeline using the following task identifier zeroshotaudioclassification see the list of available models on huggingface comodelshttps huggingface comodels filterzeroshotaudioclassification no specific forxxx available yet assign labels to the audios passed as inputs args audios str liststr np array or listnp array the pipeline handles three types of inputs a string containing a http link pointing to an audio a string containing a local path to an audio an audio loaded in numpy candidatelabels liststr the candidate labels for this audio hypothesistemplate str optional defaults to this is a sound of the sentence used in cunjunction with candidatelabels to attempt the audio classification by replacing the placeholder with the candidatelabels then likelihood is estimated by using logitsperaudio return a list of dictionaries containing result one dictionary per proposed label the dictionaries contain the following keys label str the label identified by the model it is one of the suggested candidatelabel score float the score attributed by the model for that label between 0 and 1 we need to actually check for a real protocol otherwise it s impossible to use a local file like httphuggingfaceco png batching case coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license zero shot audio classification pipeline using clapmodel this pipeline predicts the class of an audio when you provide an audio and a set of candidate_labels example python from transformers import pipeline from datasets import load_dataset dataset load_dataset ashraq esc50 audio next iter dataset train audio array classifier pipeline task zero shot audio classification model laion clap htsat unfused classifier audio candidate_labels sound of a dog sound of vaccum cleaner score 0 9996 label sound of a dog score 0 0004 label sound of vaccum cleaner learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this audio classification pipeline can currently be loaded from pipeline using the following task identifier zero shot audio classification see the list of available models on huggingface co models https huggingface co models filter zero shot audio classification no specific for_xxx available yet assign labels to the audio s passed as inputs args audios str list str np array or list np array the pipeline handles three types of inputs a string containing a http link pointing to an audio a string containing a local path to an audio an audio loaded in numpy candidate_labels list str the candidate labels for this audio hypothesis_template str optional defaults to this is a sound of the sentence used in cunjunction with candidate_labels to attempt the audio classification by replacing the placeholder with the candidate_labels then likelihood is estimated by using logits_per_audio return a list of dictionaries containing result one dictionary per proposed label the dictionaries contain the following keys label str the label identified by the model it is one of the suggested candidate_label score float the score attributed by the model for that label between 0 and 1 we need to actually check for a real protocol otherwise it s impossible to use a local file like http_huggingface_co png batching case
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotAudioClassificationPipeline(Pipeline): def __init__(self, **kwargs): super().__init__(**kwargs) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch.") def __call__(self, audios: Union[np.ndarray, bytes, str], **kwargs): return super().__call__(audios, **kwargs) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] return preprocess_params, {}, {} def preprocess(self, audio, candidate_labels=None, hypothesis_template="This is a sound of {}."): if isinstance(audio, str): if audio.startswith("http://") or audio.startswith("https://"): audio = requests.get(audio).content else: with open(audio, "rb") as f: audio = f.read() if isinstance(audio, bytes): audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate) if not isinstance(audio, np.ndarray): raise ValueError("We expect a numpy ndarray as input") if len(audio.shape) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline") inputs = self.feature_extractor( [audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt" ) inputs["candidate_labels"] = candidate_labels sequences = [hypothesis_template.format(x) for x in candidate_labels] text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True) inputs["text_inputs"] = [text_inputs] return inputs def _forward(self, model_inputs): candidate_labels = model_inputs.pop("candidate_labels") text_inputs = model_inputs.pop("text_inputs") if isinstance(text_inputs[0], UserDict): text_inputs = text_inputs[0] else: text_inputs = text_inputs[0][0] outputs = self.model(**text_inputs, **model_inputs) model_outputs = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def postprocess(self, model_outputs): candidate_labels = model_outputs.pop("candidate_labels") logits = model_outputs["logits"][0] if self.framework == "pt": probs = logits.softmax(dim=0) scores = probs.tolist() else: raise ValueError("`tf` framework not supported.") result = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) ] return result
handles arguments for zeroshot for text classification by turning each possible label into an nli premisehypothesis pair nlibased zeroshot classification pipeline using a modelforsequenceclassification trained on nli natural language inference tasks equivalent of textclassification pipelines but these models don t require a hardcoded number of potential classes they can be chosen at runtime it usually means it s slower but it is much more flexible any combination of sequences and labels can be passed and each combination will be posed as a premisehypothesis pair and passed to the pretrained model then the logit for entailment is taken as the logit for the candidate label being valid any nli model can be used but the id of the entailment label must be included in the model config s attr transformers pretrainedconfig label2id example python from transformers import pipeline oracle pipelinemodelfacebookbartlargemnli oracle i have a problem with my iphone that needs to be resolved asap candidatelabelsurgent not urgent phone tablet computer sequence i have a problem with my iphone that needs to be resolved asap labels urgent phone computer not urgent tablet scores 0 504 0 479 0 013 0 003 0 002 oracle i have a problem with my iphone that needs to be resolved asap candidatelabelsenglish german sequence i have a problem with my iphone that needs to be resolved asap labels english german scores 0 814 0 186 learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this nli pipeline can currently be loaded from pipeline using the following task identifier zeroshotclassification the models that this pipeline can use are models that have been finetuned on an nli task see the uptodate list of available models on huggingface comodelshttps huggingface comodels searchnli parse arguments and tokenize onlyfirst so that hypothesis label is not truncated override for tokenizers not supporting padding tokenizers might yell that we want to truncate to a value that is not even reached by the input in that case we don t want to truncate it seems there s not a really better way to catch that exception classify the sequences given as inputs see the zeroshotclassificationpipeline documentation for more information args sequences str or liststr the sequences to classify will be truncated if the model input is too large candidatelabels str or liststr the set of possible class labels to classify each sequence into can be a single label a string of commaseparated labels or a list of labels hypothesistemplate str optional defaults to this example is the template used to turn each label into an nlistyle hypothesis this template must include a or similar syntax for the candidate label to be inserted into the template for example the default template is this example is with the candidate label sports this would be fed into the model like cls sequence to classify sep this example is sports sep the default template works well in many cases but it may be worthwhile to experiment with different templates depending on the task setting multilabel bool optional defaults to false whether or not multiple candidate labels can be true if false the scores are normalized such that the sum of the label likelihoods for each sequence is 1 if true the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs the contradiction score return a dict or a list of dict each result comes as a dictionary with the following keys sequence str the sequence for which this is the output labels liststr the labels sorted by order of likelihood scores listfloat the probabilities for each of the labels xxxforsequenceclassification models should not use usecachetrue even if it s supported softmax over the entailment vs contradiction dim for each label independently softmax the entailment logits over all candidate labels handles arguments for zero shot for text classification by turning each possible label into an nli premise hypothesis pair nli based zero shot classification pipeline using a modelforsequenceclassification trained on nli natural language inference tasks equivalent of text classification pipelines but these models don t require a hardcoded number of potential classes they can be chosen at runtime it usually means it s slower but it is much more flexible any combination of sequences and labels can be passed and each combination will be posed as a premise hypothesis pair and passed to the pretrained model then the logit for entailment is taken as the logit for the candidate label being valid any nli model can be used but the id of the entailment label must be included in the model config s attr transformers pretrainedconfig label2id example python from transformers import pipeline oracle pipeline model facebook bart large mnli oracle i have a problem with my iphone that needs to be resolved asap candidate_labels urgent not urgent phone tablet computer sequence i have a problem with my iphone that needs to be resolved asap labels urgent phone computer not urgent tablet scores 0 504 0 479 0 013 0 003 0 002 oracle i have a problem with my iphone that needs to be resolved asap candidate_labels english german sequence i have a problem with my iphone that needs to be resolved asap labels english german scores 0 814 0 186 learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this nli pipeline can currently be loaded from pipeline using the following task identifier zero shot classification the models that this pipeline can use are models that have been fine tuned on an nli task see the up to date list of available models on huggingface co models https huggingface co models search nli parse arguments and tokenize only_first so that hypothesis label is not truncated override for tokenizers not supporting padding tokenizers might yell that we want to truncate to a value that is not even reached by the input in that case we don t want to truncate it seems there s not a really better way to catch that exception classify the sequence s given as inputs see the zeroshotclassificationpipeline documentation for more information args sequences str or list str the sequence s to classify will be truncated if the model input is too large candidate_labels str or list str the set of possible class labels to classify each sequence into can be a single label a string of comma separated labels or a list of labels hypothesis_template str optional defaults to this example is the template used to turn each label into an nli style hypothesis this template must include a or similar syntax for the candidate label to be inserted into the template for example the default template is this example is with the candidate label sports this would be fed into the model like cls sequence to classify sep this example is sports sep the default template works well in many cases but it may be worthwhile to experiment with different templates depending on the task setting multi_label bool optional defaults to false whether or not multiple candidate labels can be true if false the scores are normalized such that the sum of the label likelihoods for each sequence is 1 if true the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs the contradiction score return a dict or a list of dict each result comes as a dictionary with the following keys sequence str the sequence for which this is the output labels list str the labels sorted by order of likelihood scores list float the probabilities for each of the labels xxxforsequenceclassification models should not use use_cache true even if it s supported softmax over the entailment vs contradiction dim for each label independently softmax the entailment logits over all candidate labels
import inspect from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline logger = logging.get_logger(__name__) class ZeroShotClassificationArgumentHandler(ArgumentHandler): def _parse_labels(self, labels): if isinstance(labels, str): labels = [label.strip() for label in labels.split(",") if label.strip()] return labels def __call__(self, sequences, labels, hypothesis_template): if len(labels) == 0 or len(sequences) == 0: raise ValueError("You must include at least one label and at least one sequence.") if hypothesis_template.format(labels[0]) == hypothesis_template: raise ValueError( ( 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' "Make sure the passed template includes formatting syntax such as {{}} where the label should go." ).format(hypothesis_template) ) if isinstance(sequences, str): sequences = [sequences] sequence_pairs = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels]) return sequence_pairs, sequences @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotClassificationPipeline(ChunkPipeline): def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs): self._args_parser = args_parser super().__init__(*args, **kwargs) if self.entailment_id == -1: logger.warning( "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to " "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." ) @property def entailment_id(self): for label, ind in self.model.config.label2id.items(): if label.lower().startswith("entail"): return ind return -1 def _parse_and_tokenize( self, sequence_pairs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs ): return_tensors = self.framework if self.tokenizer.pad_token is None: logger.error( "Tokenizer was not supporting padding necessary for zero-shot, attempting to use " " `pad_token=eos_token`" ) self.tokenizer.pad_token = self.tokenizer.eos_token try: inputs = self.tokenizer( sequence_pairs, add_special_tokens=add_special_tokens, return_tensors=return_tensors, padding=padding, truncation=truncation, ) except Exception as e: if "too short" in str(e): inputs = self.tokenizer( sequence_pairs, add_special_tokens=add_special_tokens, return_tensors=return_tensors, padding=padding, truncation=TruncationStrategy.DO_NOT_TRUNCATE, ) else: raise e return inputs def _sanitize_parameters(self, **kwargs): if kwargs.get("multi_class", None) is not None: kwargs["multi_label"] = kwargs["multi_class"] logger.warning( "The `multi_class` argument has been deprecated and renamed to `multi_label`. " "`multi_class` will be removed in a future version of Transformers." ) preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = self._args_parser._parse_labels(kwargs["candidate_labels"]) if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] postprocess_params = {} if "multi_label" in kwargs: postprocess_params["multi_label"] = kwargs["multi_label"] return preprocess_params, {}, postprocess_params def __call__( self, sequences: Union[str, List[str]], *args, **kwargs, ): if len(args) == 0: pass elif len(args) == 1 and "candidate_labels" not in kwargs: kwargs["candidate_labels"] = args[0] else: raise ValueError(f"Unable to understand extra arguments {args}") return super().__call__(sequences, **kwargs) def preprocess(self, inputs, candidate_labels=None, hypothesis_template="This example is {}."): sequence_pairs, sequences = self._args_parser(inputs, candidate_labels, hypothesis_template) for i, (candidate_label, sequence_pair) in enumerate(zip(candidate_labels, sequence_pairs)): model_input = self._parse_and_tokenize([sequence_pair]) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(candidate_labels) - 1, **model_input, } def _forward(self, inputs): candidate_label = inputs["candidate_label"] sequence = inputs["sequence"] model_inputs = {k: inputs[k] for k in self.tokenizer.model_input_names} model_forward = self.model.forward if self.framework == "pt" else self.model.call if "use_cache" in inspect.signature(model_forward).parameters.keys(): model_inputs["use_cache"] = False outputs = self.model(**model_inputs) model_outputs = { "candidate_label": candidate_label, "sequence": sequence, "is_last": inputs["is_last"], **outputs, } return model_outputs def postprocess(self, model_outputs, multi_label=False): candidate_labels = [outputs["candidate_label"] for outputs in model_outputs] sequences = [outputs["sequence"] for outputs in model_outputs] logits = np.concatenate([output["logits"].numpy() for output in model_outputs]) N = logits.shape[0] n = len(candidate_labels) num_sequences = N // n reshaped_outputs = logits.reshape((num_sequences, n, -1)) if multi_label or len(candidate_labels) == 1: entailment_id = self.entailment_id contradiction_id = -1 if entailment_id == 0 else 0 entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]] scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True) scores = scores[..., 1] else: entail_logits = reshaped_outputs[..., self.entailment_id] scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True) top_inds = list(reversed(scores[0].argsort())) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
zero shot image classification pipeline using clipmodel this pipeline predicts the class of an image when you provide an image and a set of candidatelabels example python from transformers import pipeline classifier pipelinemodelopenaiclipvitlargepatch14 classifier https huggingface codatasetsnarsilimagedummyrawmainparrots png candidatelabelsanimals humans landscape score 0 965 label animals score 0 03 label humans score 0 005 label landscape classifier https huggingface codatasetsnarsilimagedummyrawmainparrots png candidatelabelsblack and white photorealist painting score 0 996 label black and white score 0 003 label photorealist score 0 0 label painting learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this image classification pipeline can currently be loaded from pipeline using the following task identifier zeroshotimageclassification see the list of available models on huggingface comodelshttps huggingface comodels filterzeroshotimageclassification assign labels to the images passed as inputs args images str liststr pil image or listpil image the pipeline handles three types of images a string containing a http link pointing to an image a string containing a local path to an image an image loaded in pil directly candidatelabels liststr the candidate labels for this image hypothesistemplate str optional defaults to this is a photo of the sentence used in cunjunction with candidatelabels to attempt the image classification by replacing the placeholder with the candidatelabels then likelihood is estimated by using logitsperimage timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list of dictionaries containing result one dictionary per proposed label the dictionaries contain the following keys label str the label identified by the model it is one of the suggested candidatelabel score float the score attributed by the model for that label between 0 and 1 batching case zero shot image classification pipeline using clipmodel this pipeline predicts the class of an image when you provide an image and a set of candidate_labels example python from transformers import pipeline classifier pipeline model openai clip vit large patch14 classifier https huggingface co datasets narsil image_dummy raw main parrots png candidate_labels animals humans landscape score 0 965 label animals score 0 03 label humans score 0 005 label landscape classifier https huggingface co datasets narsil image_dummy raw main parrots png candidate_labels black and white photorealist painting score 0 996 label black and white score 0 003 label photorealist score 0 0 label painting learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this image classification pipeline can currently be loaded from pipeline using the following task identifier zero shot image classification see the list of available models on huggingface co models https huggingface co models filter zero shot image classification assign labels to the image s passed as inputs args images str list str pil image or list pil image the pipeline handles three types of images a string containing a http link pointing to an image a string containing a local path to an image an image loaded in pil directly candidate_labels list str the candidate labels for this image hypothesis_template str optional defaults to this is a photo of the sentence used in cunjunction with candidate_labels to attempt the image classification by replacing the placeholder with the candidate_labels then likelihood is estimated by using logits_per_image timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list of dictionaries containing result one dictionary per proposed label the dictionaries contain the following keys label str the label identified by the model it is one of the suggested candidate_label score float the score attributed by the model for that label between 0 and 1 batching case
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES from ..tf_utils import stable_softmax logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotImageClassificationPipeline(Pipeline): def __init__(self, **kwargs): super().__init__(**kwargs) requires_backends(self, "vision") self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES ) def __call__(self, images: Union[str, List[str], "Image", List["Image"]], **kwargs): return super().__call__(images, **kwargs) def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "candidate_labels" in kwargs: preprocess_params["candidate_labels"] = kwargs["candidate_labels"] if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] if "hypothesis_template" in kwargs: preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"] return preprocess_params, {}, {} def preprocess(self, image, candidate_labels=None, hypothesis_template="This is a photo of {}.", timeout=None): image = load_image(image, timeout=timeout) inputs = self.image_processor(images=[image], return_tensors=self.framework) inputs["candidate_labels"] = candidate_labels sequences = [hypothesis_template.format(x) for x in candidate_labels] text_inputs = self.tokenizer(sequences, return_tensors=self.framework, padding=True) inputs["text_inputs"] = [text_inputs] return inputs def _forward(self, model_inputs): candidate_labels = model_inputs.pop("candidate_labels") text_inputs = model_inputs.pop("text_inputs") if isinstance(text_inputs[0], UserDict): text_inputs = text_inputs[0] else: text_inputs = text_inputs[0][0] outputs = self.model(**text_inputs, **model_inputs) model_outputs = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def postprocess(self, model_outputs): candidate_labels = model_outputs.pop("candidate_labels") logits = model_outputs["logits"][0] if self.framework == "pt": probs = logits.softmax(dim=-1).squeeze(-1) scores = probs.tolist() if not isinstance(scores, list): scores = [scores] elif self.framework == "tf": probs = stable_softmax(logits, axis=-1) scores = probs.numpy().tolist() else: raise ValueError(f"Unsupported framework: {self.framework}") result = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0]) ] return result
zero shot object detection pipeline using owlvitforobjectdetection this pipeline predicts bounding boxes of objects when you provide an image and a set of candidatelabels example python from transformers import pipeline detector pipelinemodelgoogleowlvitbasepatch32 taskzeroshotobjectdetection detector http images cocodataset orgval2017000000039769 jpg candidatelabelscat couch score 0 287 label cat box xmin 324 ymin 20 xmax 640 ymax 373 score 0 254 label cat box xmin 1 ymin 55 xmax 315 ymax 472 score 0 121 label couch box xmin 4 ymin 0 xmax 642 ymax 476 detector https huggingface codatasetsnarsilimagedummyrawmainparrots png candidatelabelshead bird score 0 119 label bird box xmin 71 ymin 170 xmax 410 ymax 508 learn more about the basics of using a pipeline in the pipeline tutorial pipelinetutorial this object detection pipeline can currently be loaded from pipeline using the following task identifier zeroshotobjectdetection see the list of available models on huggingface comodelshttps huggingface comodels filterzeroshotobjectdetection detect objects bounding boxes classes in the images passed as inputs args image str pil image or listdictstr any the pipeline handles three types of images a string containing an http url pointing to an image a string containing a local path to an image an image loaded in pil directly you can use this parameter to send directly a list of images or a dataset or a generator like so python from transformers import pipeline detector pipelinemodelgoogleowlvitbasepatch32 taskzeroshotobjectdetection detector image http images cocodataset orgval2017000000039769 jpg candidatelabels cat couch image http images cocodataset orgval2017000000039769 jpg candidatelabels cat couch score 0 287 label cat box xmin 324 ymin 20 xmax 640 ymax 373 score 0 25 label cat box xmin 1 ymin 55 xmax 315 ymax 472 score 0 121 label couch box xmin 4 ymin 0 xmax 642 ymax 476 score 0 287 label cat box xmin 324 ymin 20 xmax 640 ymax 373 score 0 254 label cat box xmin 1 ymin 55 xmax 315 ymax 472 score 0 121 label couch box xmin 4 ymin 0 xmax 642 ymax 476 candidatelabels str or liststr or listliststr what the model should recognize in the image threshold float optional defaults to 0 1 the probability necessary to make a prediction topk int optional defaults to none the number of top predictions that will be returned by the pipeline if the provided number is none or higher than the number of predictions available it will default to the number of predictions timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list of lists containing prediction results one list per input image each list contains dictionaries with the following keys label str text query corresponding to the found object score float score corresponding to the object between 0 and 1 box dictstr int bounding box of the detected object in image s original size it is a dictionary with xmin xmax ymin ymax keys turns list xmin xmax ymin ymax into dict xmin xmin args box torch tensor tensor containing the coordinates in corners format returns bbox dictstr int dict containing the coordinates in corners format zero shot object detection pipeline using owlvitforobjectdetection this pipeline predicts bounding boxes of objects when you provide an image and a set of candidate_labels example python from transformers import pipeline detector pipeline model google owlvit base patch32 task zero shot object detection detector http images cocodataset org val2017 000000039769 jpg candidate_labels cat couch score 0 287 label cat box xmin 324 ymin 20 xmax 640 ymax 373 score 0 254 label cat box xmin 1 ymin 55 xmax 315 ymax 472 score 0 121 label couch box xmin 4 ymin 0 xmax 642 ymax 476 detector https huggingface co datasets narsil image_dummy raw main parrots png candidate_labels head bird score 0 119 label bird box xmin 71 ymin 170 xmax 410 ymax 508 learn more about the basics of using a pipeline in the pipeline tutorial pipeline_tutorial this object detection pipeline can currently be loaded from pipeline using the following task identifier zero shot object detection see the list of available models on huggingface co models https huggingface co models filter zero shot object detection detect objects bounding boxes classes in the image s passed as inputs args image str pil image or list dict str any the pipeline handles three types of images a string containing an http url pointing to an image a string containing a local path to an image an image loaded in pil directly you can use this parameter to send directly a list of images or a dataset or a generator like so python from transformers import pipeline detector pipeline model google owlvit base patch32 task zero shot object detection detector image http images cocodataset org val2017 000000039769 jpg candidate_labels cat couch image http images cocodataset org val2017 000000039769 jpg candidate_labels cat couch score 0 287 label cat box xmin 324 ymin 20 xmax 640 ymax 373 score 0 25 label cat box xmin 1 ymin 55 xmax 315 ymax 472 score 0 121 label couch box xmin 4 ymin 0 xmax 642 ymax 476 score 0 287 label cat box xmin 324 ymin 20 xmax 640 ymax 373 score 0 254 label cat box xmin 1 ymin 55 xmax 315 ymax 472 score 0 121 label couch box xmin 4 ymin 0 xmax 642 ymax 476 candidate_labels str or list str or list list str what the model should recognize in the image threshold float optional defaults to 0 1 the probability necessary to make a prediction top_k int optional defaults to none the number of top predictions that will be returned by the pipeline if the provided number is none or higher than the number of predictions available it will default to the number of predictions timeout float optional defaults to none the maximum time in seconds to wait for fetching images from the web if none no timeout is set and the call may block forever return a list of lists containing prediction results one list per input image each list contains dictionaries with the following keys label str text query corresponding to the found object score float score corresponding to the object between 0 and 1 box dict str int bounding box of the detected object in image s original size it is a dictionary with x_min x_max y_min y_max keys turns list xmin xmax ymin ymax into dict xmin xmin args box torch tensor tensor containing the coordinates in corners format returns bbox dict str int dict containing the coordinates in corners format
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES logger = logging.get_logger(__name__) @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotObjectDetectionPipeline(ChunkPipeline): def __init__(self, **kwargs): super().__init__(**kwargs) if self.framework == "tf": raise ValueError(f"The {self.__class__} is only available in PyTorch.") requires_backends(self, "vision") self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES) def __call__( self, image: Union[str, "Image.Image", List[Dict[str, Any]]], candidate_labels: Union[str, List[str]] = None, **kwargs, ): if "text_queries" in kwargs: candidate_labels = kwargs.pop("text_queries") if isinstance(image, (str, Image.Image)): inputs = {"image": image, "candidate_labels": candidate_labels} else: inputs = image results = super().__call__(inputs, **kwargs) return results def _sanitize_parameters(self, **kwargs): preprocess_params = {} if "timeout" in kwargs: preprocess_params["timeout"] = kwargs["timeout"] postprocess_params = {} if "threshold" in kwargs: postprocess_params["threshold"] = kwargs["threshold"] if "top_k" in kwargs: postprocess_params["top_k"] = kwargs["top_k"] return preprocess_params, {}, postprocess_params def preprocess(self, inputs, timeout=None): image = load_image(inputs["image"], timeout=timeout) candidate_labels = inputs["candidate_labels"] if isinstance(candidate_labels, str): candidate_labels = candidate_labels.split(",") target_size = torch.tensor([[image.height, image.width]], dtype=torch.int32) for i, candidate_label in enumerate(candidate_labels): text_inputs = self.tokenizer(candidate_label, return_tensors=self.framework) image_features = self.image_processor(image, return_tensors=self.framework) yield { "is_last": i == len(candidate_labels) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _forward(self, model_inputs): target_size = model_inputs.pop("target_size") candidate_label = model_inputs.pop("candidate_label") is_last = model_inputs.pop("is_last") outputs = self.model(**model_inputs) model_outputs = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def postprocess(self, model_outputs, threshold=0.1, top_k=None): results = [] for model_output in model_outputs: label = model_output["candidate_label"] model_output = BaseModelOutput(model_output) outputs = self.image_processor.post_process_object_detection( outputs=model_output, threshold=threshold, target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): score = outputs["scores"][index].item() box = self._get_bounding_box(outputs["boxes"][index][0]) result = {"score": score, "label": label, "box": box} results.append(result) results = sorted(results, key=lambda x: x["score"], reverse=True) if top_k: results = results[:top_k] return results def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") xmin, ymin, xmax, ymax = box.int().tolist() bbox = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license processing savingloading class for common processors dynamically import the transformers module to grab the attribute classes of the processor form their names this is a mixin used to provide savingloading functionality for all processor classes names need to be attrclass for attr in attributes args have to match the attributes class attribute sanitize args and kwargs check each arg is of the proper class this will also catch a user initializing in the wrong order nothing is ever going to be an instance of autoxxx in that case we check the base class saves the attributes of this processor feature extractor tokenizer in the specified directory so that it can be reloaded using the processormixin frompretrained method tip this class method is simply calling featureextractionutils featureextractionmixin savepretrained and tokenizationutilsbase pretrainedtokenizerbase savepretrained please refer to the docstrings of the methods above for more information tip args savedirectory str or os pathlike directory where the feature extractor json file and the tokenizer files will be saved directory will be created if it does not exist pushtohub bool optional defaults to false whether or not to push your model to the hugging face model hub after saving it you can specify the repository you want to push to with repoid will default to the name of savedirectory in your namespace kwargs dictstr any optional additional key word arguments passed along to the utils pushtohubmixin pushtohub method if we have a custom config we copy the file defining it in the folder and set the attributes so it can be loaded from the hub include the processor class in the attribute config so this processor can then be reloaded with the autoprocessor api we added an attribute to the initkwargs of the tokenizers which needs to be cleaned up kwargscachedir cachedir kwargsforcedownload forcedownload kwargslocalfilesonly localfilesonly kwargsrevision revision useauthtoken kwargs popuseauthtoken none if useauthtoken is not none warnings warn the useauthtoken argument is deprecated and will be removed in v5 of transformers please use token instead futurewarning if token is not none raise valueerror token and useauthtoken are both specified please set only the argument token token useauthtoken if token is not none kwargstoken token args cls getargumentsfrompretrainedpretrainedmodelnameorpath kwargs return clsargs classmethod def registerforautoclasscls autoclassautoprocessor if not isinstanceautoclass str autoclass autoclass name import transformers models auto as automodule if not hasattrautomodule autoclass raise valueerrorfautoclass is not a valid auto class cls autoclass autoclass classmethod def getargumentsfrompretrainedcls pretrainedmodelnameorpath kwargs args for attributename in cls attributes classname getattrcls fattributenameclass if isinstanceclassname tuple classes tuplegetattrtransformersmodule n if n is not none else none for n in classname usefast kwargs getusefast true if usefast and classes1 is not none attributeclass classes1 else attributeclass classes0 else attributeclass getattrtransformersmodule classname args appendattributeclass frompretrainedpretrainedmodelnameorpath kwargs return args property def modelinputnamesself firstattribute getattrself self attributes0 return getattrfirstattribute modelinputnames none processormixin pushtohub copyfuncprocessormixin pushtohub if processormixin pushtohub doc is not none processormixin pushtohub doc processormixin pushtohub doc format objectprocessor objectclassautoprocessor objectfilesprocessor files coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license processing saving loading class for common processors dynamically import the transformers module to grab the attribute classes of the processor form their names this is a mixin used to provide saving loading functionality for all processor classes names need to be attr_class for attr in attributes args have to match the attributes class attribute sanitize args and kwargs check each arg is of the proper class this will also catch a user initializing in the wrong order nothing is ever going to be an instance of autoxxx in that case we check the base class saves the attributes of this processor feature extractor tokenizer in the specified directory so that it can be reloaded using the processormixin from_pretrained method tip this class method is simply calling feature_extraction_utils featureextractionmixin save_pretrained and tokenization_utils_base pretrainedtokenizerbase save_pretrained please refer to the docstrings of the methods above for more information tip args save_directory str or os pathlike directory where the feature extractor json file and the tokenizer files will be saved directory will be created if it does not exist push_to_hub bool optional defaults to false whether or not to push your model to the hugging face model hub after saving it you can specify the repository you want to push to with repo_id will default to the name of save_directory in your namespace kwargs dict str any optional additional key word arguments passed along to the utils pushtohubmixin push_to_hub method if we have a custom config we copy the file defining it in the folder and set the attributes so it can be loaded from the hub include the processor class in the attribute config so this processor can then be reloaded with the autoprocessor api we added an attribute to the init_kwargs of the tokenizers which needs to be cleaned up instantiate a processor associated with a pretrained model tip this class method is simply calling the feature extractor feature_extraction_utils featureextractionmixin from_pretrained image processor image_processing_utils imageprocessingmixin and the tokenizer tokenization_utils_base pretrainedtokenizer from_pretrained methods please refer to the docstrings of the methods above for more information tip args pretrained_model_name_or_path str or os pathlike this can be either a string the model id of a pretrained feature_extractor hosted inside a model repo on huggingface co valid model ids can be located at the root level like bert base uncased or namespaced under a user or organization name like dbmdz bert base german cased a path to a directory containing a feature extractor file saved using the sequencefeatureextractor save_pretrained method e g my_model_directory a path or url to a saved feature extractor json file e g my_model_directory preprocessor_config json kwargs additional keyword arguments passed along to both feature_extraction_utils featureextractionmixin from_pretrained and tokenization_utils_base pretrainedtokenizer from_pretrained register this class with a given auto class this should only be used for custom feature extractors as the ones in the library are already mapped with autoprocessor tip warning true this api is experimental and may have some slight breaking changes in the next releases tip args auto_class str or type optional defaults to autoprocessor the auto class to register this new feature extractor with
import os import warnings from pathlib import Path from typing import Optional, Union from .dynamic_module_utils import custom_object_save from .tokenization_utils_base import PreTrainedTokenizerBase from .utils import PushToHubMixin, copy_func, direct_transformers_import, logging logger = logging.get_logger(__name__) transformers_module = direct_transformers_import(Path(__file__).parent) AUTO_TO_BASE_CLASS_MAPPING = { "AutoTokenizer": "PreTrainedTokenizerBase", "AutoFeatureExtractor": "FeatureExtractionMixin", "AutoImageProcessor": "ImageProcessingMixin", } class ProcessorMixin(PushToHubMixin): attributes = ["feature_extractor", "tokenizer"] feature_extractor_class = None tokenizer_class = None _auto_class = None def __init__(self, *args, **kwargs): for key in kwargs: if key not in self.attributes: raise TypeError(f"Unexpected keyword argument {key}.") for arg, attribute_name in zip(args, self.attributes): if attribute_name in kwargs: raise TypeError(f"Got multiple values for argument {attribute_name}.") else: kwargs[attribute_name] = arg if len(kwargs) != len(self.attributes): raise ValueError( f"This processor requires {len(self.attributes)} arguments: {', '.join(self.attributes)}. Got " f"{len(args)} arguments instead." ) for attribute_name, arg in kwargs.items(): class_name = getattr(self, f"{attribute_name}_class") class_name = AUTO_TO_BASE_CLASS_MAPPING.get(class_name, class_name) if isinstance(class_name, tuple): proper_class = tuple(getattr(transformers_module, n) for n in class_name if n is not None) else: proper_class = getattr(transformers_module, class_name) if not isinstance(arg, proper_class): raise ValueError( f"Received a {type(arg).__name__} for argument {attribute_name}, but a {class_name} was expected." ) setattr(self, attribute_name, arg) def __repr__(self): attributes_repr = [f"- {name}: {repr(getattr(self, name))}" for name in self.attributes] attributes_repr = "\n".join(attributes_repr) return f"{self.__class__.__name__}:\n{attributes_repr}" def save_pretrained(self, save_directory, push_to_hub: bool = False, **kwargs): use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token os.makedirs(save_directory, exist_ok=True) if push_to_hub: commit_message = kwargs.pop("commit_message", None) repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) repo_id = self._create_repo(repo_id, **kwargs) files_timestamps = self._get_files_timestamps(save_directory) if self._auto_class is not None: attrs = [getattr(self, attribute_name) for attribute_name in self.attributes] configs = [(a.init_kwargs if isinstance(a, PreTrainedTokenizerBase) else a) for a in attrs] custom_object_save(self, save_directory, config=configs) for attribute_name in self.attributes: attribute = getattr(self, attribute_name) if hasattr(attribute, "_set_processor_class"): attribute._set_processor_class(self.__class__.__name__) attribute.save_pretrained(save_directory) if self._auto_class is not None: for attribute_name in self.attributes: attribute = getattr(self, attribute_name) if isinstance(attribute, PreTrainedTokenizerBase): del attribute.init_kwargs["auto_map"] if push_to_hub: self._upload_modified_files( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get("token"), ) @classmethod def from_pretrained( cls, pretrained_model_name_or_path: Union[str, os.PathLike], cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, local_files_only: bool = False, token: Optional[Union[str, bool]] = None, revision: str = "main", **kwargs, ): r kwargs["cache_dir"] = cache_dir kwargs["force_download"] = force_download kwargs["local_files_only"] = local_files_only kwargs["revision"] = revision use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: kwargs["token"] = token args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(*args) @classmethod def register_for_auto_class(cls, auto_class="AutoProcessor"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class @classmethod def _get_arguments_from_pretrained(cls, pretrained_model_name_or_path, **kwargs): args = [] for attribute_name in cls.attributes: class_name = getattr(cls, f"{attribute_name}_class") if isinstance(class_name, tuple): classes = tuple(getattr(transformers_module, n) if n is not None else None for n in class_name) use_fast = kwargs.get("use_fast", True) if use_fast and classes[1] is not None: attribute_class = classes[1] else: attribute_class = classes[0] else: attribute_class = getattr(transformers_module, class_name) args.append(attribute_class.from_pretrained(pretrained_model_name_or_path, **kwargs)) return args @property def model_input_names(self): first_attribute = getattr(self, self.attributes[0]) return getattr(first_attribute, "model_input_names", None) ProcessorMixin.push_to_hub = copy_func(ProcessorMixin.push_to_hub) if ProcessorMixin.push_to_hub.__doc__ is not None: ProcessorMixin.push_to_hub.__doc__ = ProcessorMixin.push_to_hub.__doc__.format( object="processor", object_class="AutoProcessor", object_files="processor files" )
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license a function that calls the internal softmaxbackwarddata pytorch method and that adjusts the arguments according to the torch version detected prune a linear layer to keep only entries in index used to remove heads args layer torch nn linear the layer to prune index torch longtensor the indices to keep in the layer dim int optional defaults to 0 the dimension on which to keep the indices returns torch nn linear the pruned layer as a new layer with requiresgradtrue 1dconvolutional layer as defined by radford et al for openai gpt and also used in gpt2 basically works like a linear layer but the weights are transposed args nf int the number of output features nx int the number of input features prune a conv1d layer to keep only entries in index a conv1d work as a linear layer see e g bert but the weights are transposed used to remove heads args layer pytorchutils conv1d the layer to prune index torch longtensor the indices to keep in the layer dim int optional defaults to 1 the dimension on which to keep the indices returns pytorchutils conv1d the pruned layer as a new layer with requiresgradtrue prune a conv1d or linear layer to keep only entries in index used to remove heads args layer uniontorch nn linear conv1d the layer to prune index torch longtensor the indices to keep in the layer dim int optional the dimension on which to keep the indices returns torch nn linear or pytorchutils conv1d the pruned layer as a new layer with requiresgradtrue this function chunks the inputtensors into smaller input tensor parts of size chunksize over the dimension chunkdim it then applies a layer forwardfn to each chunk independently to save memory if the forwardfn is independent across the chunkdim this function will yield the same result as directly applying forwardfn to inputtensors args forwardfn callable torch tensor the forward function of the model chunksize int the chunk size of a chunked tensor numchunks leninputtensors0 chunksize chunkdim int the dimension over which the inputtensors should be chunked inputtensors tupletorch tensor the input tensors of forwardfn which will be chunked returns torch tensor a tensor with the same shape as the forwardfn would have given if applied examples python rename the usual forward fn to forwardchunk def forwardchunkself hiddenstates hiddenstates self decoderhiddenstates return hiddenstates implement a chunked forward function def forwardself hiddenstates return applychunkingtoforwardself forwardchunk self chunksizelmhead self seqlendim hiddenstates assert leninputtensors 0 finputtensors has to be a tuplelist of tensors inspect signature exist since python 3 5 and is a python method no problem with backward compatibility numargsinforwardchunkfn leninspect signatureforwardfn parameters if numargsinforwardchunkfn leninputtensors raise valueerror fforwardchunkfn expects numargsinforwardchunkfn arguments but only leninputtensors input tensors are given if chunksize 0 tensorshape inputtensors0 shapechunkdim for inputtensor in inputtensors if inputtensor shapechunkdim tensorshape raise valueerror fall input tenors have to be of the same shape tensorshape ffound shape inputtensor shapechunkdim if inputtensors0 shapechunkdim chunksize 0 raise valueerror fthe dimension to be chunked inputtensors0 shapechunkdim has to be a multiple of the chunk fsize chunksize numchunks inputtensors0 shapechunkdim chunksize chunk input tensor into tuples inputtensorschunks tupleinputtensor chunknumchunks dimchunkdim for inputtensor in inputtensors apply forward fn to every tuple outputchunks tupleforwardfninputtensorschunk for inputtensorschunk in zipinputtensorschunks concatenate output at same dimension return torch catoutputchunks dimchunkdim return forwardfninputtensors def findpruneableheadsandindices heads listint nheads int headsize int alreadyprunedheads setint tuplesetint torch longtensor mask torch onesnheads headsize heads setheads alreadyprunedheads convert to set and remove already pruned heads for head in heads compute how many pruned heads are before the head and move the index accordingly head head sum1 if h head else 0 for h in alreadyprunedheads maskhead 0 mask mask view1 contiguous eq1 index torch longtensor torch arangelenmaskmask long return heads index def meshgrid tensors uniontorch tensor listtorch tensor indexing optionalstr none tupletorch tensor return torch meshgridtensors indexingindexing def idtensorstoragetensor torch tensor tupletorch device int int if tensor device type xla and istorchtpuavailable note xla tensors dont have storage use some other unique id to distinguish this is a xla tensor it must be created using torchxla s device so the following import is safe import torchxla uniqueid torchxla xlac xlagettensoridtensor else uniqueid storageptrtensor return tensor device uniqueid storagesizetensor 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license a function that calls the internal _softmax_backward_data pytorch method and that adjusts the arguments according to the torch version detected prune a linear layer to keep only entries in index used to remove heads args layer torch nn linear the layer to prune index torch longtensor the indices to keep in the layer dim int optional defaults to 0 the dimension on which to keep the indices returns torch nn linear the pruned layer as a new layer with requires_grad true 1d convolutional layer as defined by radford et al for openai gpt and also used in gpt 2 basically works like a linear layer but the weights are transposed args nf int the number of output features nx int the number of input features prune a conv1d layer to keep only entries in index a conv1d work as a linear layer see e g bert but the weights are transposed used to remove heads args layer pytorch_utils conv1d the layer to prune index torch longtensor the indices to keep in the layer dim int optional defaults to 1 the dimension on which to keep the indices returns pytorch_utils conv1d the pruned layer as a new layer with requires_grad true prune a conv1d or linear layer to keep only entries in index used to remove heads args layer union torch nn linear conv1d the layer to prune index torch longtensor the indices to keep in the layer dim int optional the dimension on which to keep the indices returns torch nn linear or pytorch_utils conv1d the pruned layer as a new layer with requires_grad true this function chunks the input_tensors into smaller input tensor parts of size chunk_size over the dimension chunk_dim it then applies a layer forward_fn to each chunk independently to save memory if the forward_fn is independent across the chunk_dim this function will yield the same result as directly applying forward_fn to input_tensors args forward_fn callable torch tensor the forward function of the model chunk_size int the chunk size of a chunked tensor num_chunks len input_tensors 0 chunk_size chunk_dim int the dimension over which the input_tensors should be chunked input_tensors tuple torch tensor the input tensors of forward_fn which will be chunked returns torch tensor a tensor with the same shape as the forward_fn would have given if applied examples python rename the usual forward fn to forward_chunk def forward_chunk self hidden_states hidden_states self decoder hidden_states return hidden_states implement a chunked forward function def forward self hidden_states return apply_chunking_to_forward self forward_chunk self chunk_size_lm_head self seq_len_dim hidden_states inspect signature exist since python 3 5 and is a python method no problem with backward compatibility chunk input tensor into tuples apply forward fn to every tuple concatenate output at same dimension finds the heads and their indices taking already_pruned_heads into account args heads list int list of the indices of heads to prune n_heads int the number of heads in the model head_size int the size of each head already_pruned_heads set int a set of already pruned heads returns tuple set int torch longtensor a tuple with the indices of heads to prune taking already_pruned_heads into account and the indices of rows columns to keep in the layer weight convert to set and remove already pruned heads compute how many pruned heads are before the head and move the index accordingly wrapper around torch meshgrid to avoid warning messages about the introduced indexing argument reference https pytorch org docs 1 13 generated torch meshgrid html unique identifier to a tensor storage multiple different tensors can share the same underlying storage for example meta tensors all share the same storage and thus their identifier will all be equal this identifier is guaranteed to be unique and constant for this tensor s storage during its lifetime two tensor storages with non overlapping lifetimes may have the same id note xla tensors dont have storage use some other unique id to distinguish this is a xla tensor it must be created using torch_xla s device so the following import is safe
import inspect from typing import Callable, List, Optional, Set, Tuple, Union import torch from packaging import version from safetensors.torch import storage_ptr, storage_size from torch import nn from .utils import is_torch_tpu_available, logging ALL_LAYERNORM_LAYERS = [nn.LayerNorm] logger = logging.get_logger(__name__) parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) is_torch_greater_or_equal_than_2_1 = parsed_torch_version_base >= version.parse("2.1") is_torch_greater_or_equal_than_2_0 = parsed_torch_version_base >= version.parse("2.0") is_torch_greater_or_equal_than_1_13 = parsed_torch_version_base >= version.parse("1.13") is_torch_greater_or_equal_than_1_12 = parsed_torch_version_base >= version.parse("1.12") is_torch_greater_or_equal_than_1_11 = parsed_torch_version_base >= version.parse("1.11") is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11") is_torch_1_8_0 = parsed_torch_version_base == version.parse("1.8.0") def softmax_backward_data(parent, grad_output, output, dim, self): from torch import _softmax_backward_data if is_torch_less_than_1_11: return _softmax_backward_data(grad_output, output, parent.dim, self) else: return _softmax_backward_data(grad_output, output, parent.dim, self.dtype) def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear: index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer class Conv1D(nn.Module): def __init__(self, nf, nx): super().__init__() self.nf = nf self.weight = nn.Parameter(torch.empty(nx, nf)) self.bias = nn.Parameter(torch.zeros(nf)) nn.init.normal_(self.weight, std=0.02) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_layer( layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[nn.Linear, Conv1D]: if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError(f"Can't prune layer of class {layer.__class__}") def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor: assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors" num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) if num_args_in_forward_chunk_fn != len(input_tensors): raise ValueError( f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input " "tensors are given" ) if chunk_size > 0: tensor_shape = input_tensors[0].shape[chunk_dim] for input_tensor in input_tensors: if input_tensor.shape[chunk_dim] != tensor_shape: raise ValueError( f"All input tenors have to be of the same shape: {tensor_shape}, " f"found shape {input_tensor.shape[chunk_dim]}" ) if input_tensors[0].shape[chunk_dim] % chunk_size != 0: raise ValueError( f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk " f"size {chunk_size}" ) num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) return torch.cat(output_chunks, dim=chunk_dim) return forward_fn(*input_tensors) def find_pruneable_heads_and_indices( heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] ) -> Tuple[Set[int], torch.LongTensor]: mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads for head in heads: head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: torch.LongTensor = torch.arange(len(mask))[mask].long() return heads, index def meshgrid( *tensors: Union[torch.Tensor, List[torch.Tensor]], indexing: Optional[str] = None ) -> Tuple[torch.Tensor, ...]: return torch.meshgrid(*tensors, indexing=indexing) def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: if tensor.device.type == "xla" and is_torch_tpu_available(): import torch_xla unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) else: unique_id = storage_ptr(tensor) return tensor.device, unique_id, storage_size(tensor)
the fnindex is necessary to indicate to gradio that we will use the run method of the space this looks into the current repo s open prs to see if a pr for safetensors was already open if so it returns it it checks that the pr was opened by the bot and not by another user so as to prevent security breaches this is an additional head call that could be removed if we could infer shardednonsharded from the pr description the fn_index is necessary to indicate to gradio that we will use the run method of the space this looks into the current repo s open prs to see if a pr for safetensors was already open if so it returns it it checks that the pr was opened by the bot and not by another user so as to prevent security breaches this is an additional head call that could be removed if we could infer sharded non sharded from the pr description
import json import uuid from typing import Optional import requests from huggingface_hub import Discussion, HfApi, get_repo_discussions from .utils import cached_file, logging logger = logging.get_logger(__name__) def previous_pr(api: HfApi, model_id: str, pr_title: str, token: str) -> Optional["Discussion"]: main_commit = api.list_repo_commits(model_id, token=token)[0].commit_id for discussion in get_repo_discussions(repo_id=model_id, token=token): if discussion.title == pr_title and discussion.status == "open" and discussion.is_pull_request: commits = api.list_repo_commits(model_id, revision=discussion.git_reference, token=token) if main_commit == commits[1].commit_id: return discussion return None def spawn_conversion(token: str, private: bool, model_id: str): logger.info("Attempting to convert .bin model on the fly to safetensors.") safetensors_convert_space_url = "https://safetensors-convert.hf.space" sse_url = f"{safetensors_convert_space_url}/queue/join" sse_data_url = f"{safetensors_convert_space_url}/queue/data" hash_data = {"fn_index": 1, "session_hash": str(uuid.uuid4())} def start(_sse_connection, payload): for line in _sse_connection.iter_lines(): line = line.decode() if line.startswith("data:"): resp = json.loads(line[5:]) logger.debug(f"Safetensors conversion status: {resp['msg']}") if resp["msg"] == "queue_full": raise ValueError("Queue is full! Please try again.") elif resp["msg"] == "send_data": event_id = resp["event_id"] response = requests.post( sse_data_url, stream=True, params=hash_data, json={"event_id": event_id, **payload, **hash_data}, ) response.raise_for_status() elif resp["msg"] == "process_completed": return with requests.get(sse_url, stream=True, params=hash_data) as sse_connection: data = {"data": [model_id, private, token]} try: logger.debug("Spawning safetensors automatic conversion.") start(sse_connection, data) except Exception as e: logger.warning(f"Error during conversion: {repr(e)}") def get_conversion_pr_reference(api: HfApi, model_id: str, **kwargs): private = api.model_info(model_id).private logger.info("Attempting to create safetensors variant") pr_title = "Adding `safetensors` variant of this model" token = kwargs.get("token") pr = previous_pr(api, model_id, pr_title, token=token) if pr is None or (not private and pr.author != "SFConvertBot"): spawn_conversion(token, private, model_id) pr = previous_pr(api, model_id, pr_title, token=token) else: logger.info("Safetensors PR exists") sha = f"refs/pr/{pr.num}" return sha def auto_conversion(pretrained_model_name_or_path: str, **cached_file_kwargs): api = HfApi(token=cached_file_kwargs.get("token")) sha = get_conversion_pr_reference(api, pretrained_model_name_or_path, **cached_file_kwargs) if sha is None: return None, None cached_file_kwargs["revision"] = sha del cached_file_kwargs["_commit_hash"] sharded = api.file_exists( pretrained_model_name_or_path, "model.safetensors.index.json", revision=sha, token=cached_file_kwargs.get("token"), ) filename = "model.safetensors.index.json" if sharded else "model.safetensors" resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) return resolved_archive_file, sha, sharded
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from .trainer_sm import SageMakerTrainer from .training_args_sm import SageMakerTrainingArguments, is_sagemaker_dp_enabled
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import warnings from ..trainer import Trainer from ..utils import logging logger = logging.get_logger(__name__) class SageMakerTrainer(Trainer): def __init__(self, args=None, **kwargs): warnings.warn( "`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` " "instead.", FutureWarning, ) super().__init__(args=args, **kwargs)
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo should be moved to utils after refactoring of sagemakertrainer get the sagemaker specific mp parameters from smpoptions variable parse it and check the field partitions is included it is required for model parallel get the sagemaker specific framework parameters from mpioptions variable parse it and check the field sagemakerdistributeddataparallelenabled lastly check if the smdistributed module is present if ngpu is 1 we ll use nn dataparallel if you only want to use a specific subset of gpus use cudavisibledevices0 explicitly set cuda to the first index 0 cuda device otherwise setdevice will trigger an error that a device index is missing index 0 takes into account the gpus available in the environment so cudavisibledevices1 2 with cuda 0 will use the first gpu in that env i e gpu1 sometimes the line in the postinit has not been run before we end up here so just checking we re not at the default value here we ll use torch distributed initializes the distributed backend which will take care of synchronizing nodesgpus 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license todo should be moved to utils after refactoring of sagemakertrainer get the sagemaker specific mp parameters from smp_options variable parse it and check the field partitions is included it is required for model parallel get the sagemaker specific framework parameters from mpi_options variable parse it and check the field sagemaker_distributed_dataparallel_enabled lastly check if the smdistributed module is present noqa f401 if n_gpu is 1 we ll use nn dataparallel if you only want to use a specific subset of gpus use cuda_visible_devices 0 explicitly set cuda to the first index 0 cuda device otherwise set_device will trigger an error that a device index is missing index 0 takes into account the gpus available in the environment so cuda_visible_devices 1 2 with cuda 0 will use the first gpu in that env i e gpu 1 sometimes the line in the postinit has not been run before we end up here so just checking we re not at the default value here we ll use torch distributed initializes the distributed backend which will take care of synchronizing nodes gpus
import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging logger = logging.get_logger(__name__) def is_sagemaker_model_parallel_available(): smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False return importlib.util.find_spec("smdistributed") is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class SageMakerTrainingArguments(TrainingArguments): mp_parameters: str = field( default="", metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"}, ) def __post_init__(self): super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead.", FutureWarning, ) @cached_property def _setup_devices(self) -> "torch.device": logger.info("PyTorch: setting up devices") if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: device = torch.device("cpu") self._n_gpu = 0 elif is_sagemaker_model_parallel_available(): local_rank = smp.local_rank() device = torch.device("cuda", local_rank) self._n_gpu = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp torch.distributed.init_process_group(backend="smddp", timeout=self.ddp_timeout_delta) self.local_rank = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK")) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 elif self.local_rank == -1: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self._n_gpu = torch.cuda.device_count() else: if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 if device.type == "cuda": torch.cuda.set_device(device) return device @property def world_size(self): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def place_model_on_device(self): return not is_sagemaker_model_parallel_available() @property def _no_sync_in_gradient_accumulation(self): return False
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license deal with dynamic shape in tensorflow cleanly args tensor tf tensor or np ndarray the tensor we want the shape of returns listint the shape of the tensor as a list stable wrapper that returns the same output as tf nn softmax but that works reliably with xla on cpu it is meant as a workaround for the following issuehttps github comtensorflowtensorflowissues55682 and will be removed after it gets fixed the arguments and outputs are the same as tf nn softmax and relies on the fact that softmaxx softmaxx c see https ogunlao github io20200426youdontreallyknowsoftmax html args logits tf tensor must be one of the following types half float32 float64 axis int optional the dimension softmax would be performed on the default is 1 which indicates the last dimension name str optional a name for the operation returns tf tensor a tensor has the same type and shape as logits todo when the issue linked above gets sorted add a check on tf version here and use the original function if it has the fix after we drop the support for unfixed versions remove this function this is a very simplified functional layernorm designed to duplicate the functionality of pytorch nn functional layernorm when this is needed to port models in transformers get mean and variance on the axis to be normalized reshape scale and weight to have the same rank as inputs but with 1 dimensions on every dimension except axis compute layer normalization using the batchnormalization function replicates the behavior of torch flatten in tf if enddim or startdim is negative count them from the end invert an attention mask e g switches 0 and 1 args encoderattentionmask torch tensor an attention mask returns tf tensor the inverted attention mask t5 has a mask that can compare sequence ids we can simulate this here with this transposition cf https github comtensorflowmeshblob8d2465e9bc93129b913b5ccc6a59aa97abd96ec6meshtensorflow transformertransformerlayers pyl270 encoderextendedattentionmask encoderextendedattentionmask encoderextendedattentionmask transpose1 2 tf gather on which tf embedding layers are based won t check positive out of bound indices on gpu returning zeros instead this function adds a check against that dangerous silent behavior args tensor tf tensor the tensor of indices to check embeddim int the embedding dimension tensorname str optional the name of the tensor to use in the error message saves attributes data of the specified name into the hdf5 group this method deals with an inherent problem of hdf5 file which is not able to store data larger than hdf5objectheaderlimit bytes args group a pointer to a hdf5 group name a name of the attributes to save data attributes data to store raises runtimeerror if any single attribute is too large to be saved copied from keras to transformers to avoid versioning issues check that no item in data is larger than hdf5objectheaderlimit because in that case even chunking the array would not make the saving possible expecting this to never be true this will never loop forever thanks to the test above loads attributes of the specified name from the hdf5 group this method deals with an inherent problem of hdf5 file which is not able to store data larger than hdf5objectheaderlimit bytes args group a pointer to a hdf5 group name a name of the attributes to load returns data attributes data copied from keras to transformers to avoid versioning issues expands 1dimensional tensors into 2dimensional tensors copied from keras to here to avoid versioning issues def expandsingle1dtensort if isinstancet tf tensor and t shape rank 1 return tf expanddimst axis1 return t return tf nest mapstructureexpandsingle1dtensor data 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license deal with dynamic shape in tensorflow cleanly args tensor tf tensor or np ndarray the tensor we want the shape of returns list int the shape of the tensor as a list stable wrapper that returns the same output as tf nn softmax but that works reliably with xla on cpu it is meant as a workaround for the following issue https github com tensorflow tensorflow issues 55682 and will be removed after it gets fixed the arguments and outputs are the same as tf nn softmax and relies on the fact that softmax x softmax x c see https ogunlao github io 2020 04 26 you_dont_really_know_softmax html args logits tf tensor must be one of the following types half float32 float64 axis int optional the dimension softmax would be performed on the default is 1 which indicates the last dimension name str optional a name for the operation returns tf tensor a tensor has the same type and shape as logits todo when the issue linked above gets sorted add a check on tf version here and use the original function if it has the fix after we drop the support for unfixed versions remove this function this is a very simplified functional layernorm designed to duplicate the functionality of pytorch nn functional layer_norm when this is needed to port models in transformers get mean and variance on the axis to be normalized reshape scale and weight to have the same rank as inputs but with 1 dimensions on every dimension except axis compute layer normalization using the batch_normalization function replicates the behavior of torch flatten in tf if end_dim or start_dim is negative count them from the end invert an attention mask e g switches 0 and 1 args encoder_attention_mask torch tensor an attention mask returns tf tensor the inverted attention mask catches stray numpy inputs t5 has a mask that can compare sequence ids we can simulate this here with this transposition cf https github com tensorflow mesh blob 8d2465e9bc93129b913b5ccc6a59aa97abd96ec6 mesh_tensorflow transformer transformer_layers py l270 encoder_extended_attention_mask encoder_extended_attention_mask encoder_extended_attention_mask transpose 1 2 tf gather on which tf embedding layers are based won t check positive out of bound indices on gpu returning zeros instead this function adds a check against that dangerous silent behavior args tensor tf tensor the tensor of indices to check embed_dim int the embedding dimension tensor_name str optional the name of the tensor to use in the error message saves attributes data of the specified name into the hdf5 group this method deals with an inherent problem of hdf5 file which is not able to store data larger than hdf5_object_header_limit bytes args group a pointer to a hdf5 group name a name of the attributes to save data attributes data to store raises runtimeerror if any single attribute is too large to be saved copied from keras to transformers to avoid versioning issues check that no item in data is larger than hdf5_object_header_limit because in that case even chunking the array would not make the saving possible expecting this to never be true this will never loop forever thanks to the test above loads attributes of the specified name from the hdf5 group this method deals with an inherent problem of hdf5 file which is not able to store data larger than hdf5_object_header_limit bytes args group a pointer to a hdf5 group name a name of the attributes to load returns data attributes data copied from keras to transformers to avoid versioning issues expands 1 dimensional tensor s into 2 dimensional tensor s copied from keras to here to avoid versioning issues
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging logger = logging.get_logger(__name__) def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] def stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor: return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name) def functional_layernorm(inputs, weight, bias, epsilon=1e-5, axis=-1): if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(axis, int): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis.") mean, variance = tf.nn.moments(inputs, axes=[axis], keepdims=True) if axis != -1: shape = [1] * inputs.shape.rank shape[axis] = shape_list(inputs)[axis] weight = tf.reshape(weight, shape) bias = tf.reshape(bias, shape) outputs = tf.nn.batch_normalization( inputs, mean, variance, offset=bias, scale=weight, variance_epsilon=epsilon, ) return outputs def flatten(input, start_dim=0, end_dim=-1): if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input in_shape = tf.shape(input) flattened_dim = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1]) out_shape = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0) return tf.reshape(input, out_shape) def invert_attention_mask(encoder_attention_mask: tf.Tensor) -> tf.Tensor: if not isinstance(encoder_attention_mask, tf.Tensor): encoder_attention_mask = tf.convert_to_tensor(encoder_attention_mask) if encoder_attention_mask.shape.rank == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = ( tf.cast(1, encoder_attention_mask.dtype) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str = "input_ids") -> None: tf.debugging.assert_less( tensor, tf.cast(embed_dim, dtype=tensor.dtype), message=( f"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding " f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." ), ) def save_attributes_to_hdf5_group(group, name, data): HDF5_OBJECT_HEADER_LIMIT = 64512 bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT] if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} " f"bytes: {bad_attributes}" ) data_npy = np.asarray(data) num_chunks = 1 chunked_data = np.array_split(data_npy, num_chunks) while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data): num_chunks += 1 chunked_data = np.array_split(data_npy, num_chunks) if num_chunks > 1: for chunk_id, chunk_data in enumerate(chunked_data): group.attrs["%s%d" % (name, chunk_id)] = chunk_data else: group.attrs[name] = data def load_attributes_from_hdf5_group(group, name): if name in group.attrs: data = [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs[name]] else: data = [] chunk_id = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def expand_1d(data): def _expand_single_1d_tensor(t): if isinstance(t, tf.Tensor) and t.shape.rank == 1: return tf.expand_dims(t, axis=-1) return t return tf.nest.map_structure(_expand_single_1d_tensor, data)
codingutf8 2023 the huggingface inc team 2018 amazon com inc or its affiliates licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license time series distributional output classes and utilities returns the mean of the distribution returns the variance of the distribution returns the standard deviation of the distribution return if self dim 1 else self dim property def eventdimself int r number of event dimensions i e length of the eventshape tuple of the distributions that this object constructs return 0 0 def getparameterprojectionself infeatures int nn module r return the parameter projection layer that maps the input to the appropriate parameters of the distribution raise notimplementederror staticmethod def squareplusx torch tensor torch tensor r helper to map inputs to the positive orthant by applying the squareplus operation reference https twitter comjonbarronstatus1387167648669048833 studentt distribution output class normal distribution output class negative binomial distribution output class overwrites the parent class method we cannot scale using the affine transformation since negative binomial should return integers instead we scale the parameters see scaling property of gamma coding utf 8 2023 the huggingface inc team 2018 amazon com inc or its affiliates licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license time series distributional output classes and utilities returns the mean of the distribution returns the variance of the distribution returns the standard deviation of the distribution shape of each individual event contemplated by the distributions that this object constructs number of event dimensions i e length of the event_shape tuple of the distributions that this object constructs a float that will have a valid numeric value when computing the log loss of the corresponding distribution by default 0 0 this value will be used when padding data series return the parameter projection layer that maps the input to the appropriate parameters of the distribution converts arguments to the right shape and domain the domain depends on the type of distribution while the correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a distribution of the right event_shape helper to map inputs to the positive orthant by applying the square plus operation reference https twitter com jon_barron status 1387167648669048833 student t distribution output class normal distribution output class negative binomial distribution output class overwrites the parent class method we cannot scale using the affine transformation since negative binomial should return integers instead we scale the parameters see scaling property of gamma
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class AffineTransformed(TransformedDistribution): def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0): self.scale = 1.0 if scale is None else scale self.loc = 0.0 if loc is None else loc super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)]) @property def mean(self): return self.base_dist.mean * self.scale + self.loc @property def variance(self): return self.base_dist.variance * self.scale**2 @property def stddev(self): return self.variance.sqrt() class ParameterProjection(nn.Module): def __init__( self, in_features: int, args_dim: Dict[str, int], domain_map: Callable[..., Tuple[torch.Tensor]], **kwargs ) -> None: super().__init__(**kwargs) self.args_dim = args_dim self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()]) self.domain_map = domain_map def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: params_unbounded = [proj(x) for proj in self.proj] return self.domain_map(*params_unbounded) class LambdaLayer(nn.Module): def __init__(self, function): super().__init__() self.function = function def forward(self, x, *args): return self.function(x, *args) class DistributionOutput: distribution_class: type in_features: int args_dim: Dict[str, int] def __init__(self, dim: int = 1) -> None: self.dim = dim self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim} def _base_distribution(self, distr_args): if self.dim == 1: return self.distribution_class(*distr_args) else: return Independent(self.distribution_class(*distr_args), 1) def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None, ) -> Distribution: distr = self._base_distribution(distr_args) if loc is None and scale is None: return distr else: return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim) @property def event_shape(self) -> Tuple: r return () if self.dim == 1 else (self.dim,) @property def event_dim(self) -> int: r return len(self.event_shape) @property def value_in_support(self) -> float: r return 0.0 def get_parameter_projection(self, in_features: int) -> nn.Module: r return ParameterProjection( in_features=in_features, args_dim=self.args_dim, domain_map=LambdaLayer(self.domain_map), ) def domain_map(self, *args: torch.Tensor): r raise NotImplementedError() @staticmethod def squareplus(x: torch.Tensor) -> torch.Tensor: r return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0 class StudentTOutput(DistributionOutput): args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} distribution_class: type = StudentT @classmethod def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) df = 2.0 + cls.squareplus(df) return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) class NormalOutput(DistributionOutput): args_dim: Dict[str, int] = {"loc": 1, "scale": 1} distribution_class: type = Normal @classmethod def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor): scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps) return loc.squeeze(-1), scale.squeeze(-1) class NegativeBinomialOutput(DistributionOutput): args_dim: Dict[str, int] = {"total_count": 1, "logits": 1} distribution_class: type = NegativeBinomial @classmethod def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor): total_count = cls.squareplus(total_count) return total_count.squeeze(-1), logits.squeeze(-1) def _base_distribution(self, distr_args) -> Distribution: total_count, logits = distr_args if self.dim == 1: return self.distribution_class(total_count=total_count, logits=logits) else: return Independent(self.distribution_class(total_count=total_count, logits=logits), 1) def distribution( self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None ) -> Distribution: total_count, logits = distr_args if scale is not None: logits += scale.log() return self._base_distribution((total_count, logits))
codingutf8 2020 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tokenization classes for python tokenizers for fast tokenizers provided by huggingface s tokenizers library see tokenizationutilsfast py slow tokenizers are saved in a vocabulary plus three separated files trie in python creates a trie out of a list of words the trie is used to split on addedtokens in one pass loose reference https en wikipedia orgwikitrie passes over every char utf8 char on word and recursively adds it to the internal data trie representation the special key is used to represent termination this function is idempotent adding twice the same word will leave the trie unchanged example python trie trie trie addhello trie data h e l l o 1 trie addhello trie data h e l l o 1 1 prevent empty string will look for the words added to the trie within text output is the original string splitted along the boundaries of the words found this trie will match the longest possible word first example python trie trie trie splitcls this is a extraid100 cls this is a extraid100 trie addcls trie addextraid1 trie addextraid100 trie splitcls this is a extraid100 cls this is a extraid100 indexes are counted left of the chars index hello index 0 is left of h index 1 is between h and e index 5 is right of the o states are going to capture every possible start indexes as above as keys and have as values a pointer to the position in the trie where we re at this is a partial match for now this enables to keep track of multiple matches while we re iterating the string if the trie contains blowing and lower and we encounter the string blower we need to split into b lower this is where we need to keep track of multiple possible starts this will contain every indices where we need to cut we force to cut at offset 0 and lentext added later this is used by the lookahead which needs to skip over some text where the full match exceeded the place in the initial for loop main loop giving this algorithm on complexity prevents the lookahead for matching twice like extraid100 and id100 this will track every state that stop matching we need to stop tracking them if we look at lowball we re going to match l add it to states o w then fail on b we need to remove 0 from the valid states whenever we found a match we need to drop everything this is a greedy algorithm it will match on the first found token in this case we already have partial matches but unfinished this is a final match we need to reset and store the results in offsets lookahead to match longest first important in case of extraid1 vs extraid100 here we are also actively looking for other earlier partial matches cls l we need to match cls even if l is special this partial match is later we can stop looking this partial match is earlier the trie pointer was already updated so index is 1 here lookstart start and looktriepointer triepointer it wasn t updated yet so indices are current ones end of string end lookahead storing and resetting the current character being looked at has a match within the trie update the pointer it will be stored back into states later storing back the new pointer into the states partial matches got longer by one the new character has not match in the trie we need to stop keeping track of this partial match we can t do it directly within the loop because of how python iteration works either clearing the full start we found a real match or clearing only the partial matches that didn t work if this character is a starting character within the trie start keeping track of this partial match we have a cut at the end with states this is a final match we need to reset and store the results in offsets longest cut is always the one with lower start so the first item so we need to break we have all the offsets now we just need to do the actual splitting we need to eventually add the first part of the string and the eventual last part this might happen if there s a match at index 0 we re also preventing zerowidth cuts in case of two consecutive matches checks whether char is a whitespace character t n and r are technically control characters but we treat them as whitespace since they are generally considered as such if char or char t or char n or char r return true cat unicodedata categorychar if cat zs return true return false def iscontrolchar these are technically control characters but we count them as whitespace characters checks whether char is a punctuation character cp ordchar we treat all nonletternumber ascii as punctuation characters such as and are not in the unicode punctuation class but we treat them as punctuation anyways for consistency if cp 33 and cp 47 or cp 58 and cp 64 or cp 91 and cp 96 or cp 123 and cp 126 return true cat unicodedata categorychar if cat startswithp return true return false def isendofwordtext checks whether the first character in text is one of a punctuation control or whitespace character firstchar text0 return booliscontrolfirstchar ispunctuationfirstchar iswhitespacefirstchar def insertonetokentoorderedlisttokenlist liststr newtoken str insertionidx bisect bisectlefttokenlist newtoken checks if newtoken is already in the ordered tokenlist if insertionidx lentokenlist and tokenlistinsertionidx newtoken newtoken is in tokenlist don t add return else tokenlist insertinsertionidx newtoken addenddocstringsinittokenizerdocstring class pretrainedtokenizerpretrainedtokenizerbase def initself kwargs 1 init the parent class self tokenstrie trie 2 init addedtokensdecoder if child class did not if not hasattrself addedtokensdecoder self addedtokensdecoder dictint addedtoken 3 if a addedtokensdecoder is passed we are loading from a saved tokenizer we overwrite self addedtokensdecoder updatekwargs popaddedtokensdecoder self addedtokensencoder dictstr int k content v for v k in self addedtokensdecoder items 4 init the parent class super initkwargs 4 if some of the special tokens are not part of the vocab we add them at the end the order of addition is the same as self specialtokensattributes following tokenizers self addtokens token for token in self allspecialtokensextended if token not in self addedtokensencoder specialtokenstrue self decodeusesourcetokenizer false property def isfastself bool return false property def vocabsizeself int raise notimplementederror property def addedtokensencoderself dictstr int return k content v for v k in sortedself addedtokensdecoder items keylambda item item0 property def addedtokensdecoderself dictint addedtoken return dictsortedself addedtokensdecoder items keylambda item item0 addedtokensdecoder setter def addedtokensdecoderself value dictint unionaddedtoken str dictint addedtoken always raise an error if string because users should define the behavior for index token in value items if not isinstancetoken str addedtoken or not isinstanceindex int raise valueerror fthe provided addedtokensdecoder has an element of type index class token class should be a dict of int unionaddedtoken str self addedtokensdecoderindex addedtokentoken if isinstancetoken str else token self addedtokensencoderstrtoken index def getaddedvocabself dictstr int return self addedtokensencoder def lenself return lensetself getvocab keys def addtokensself newtokens unionliststr listaddedtoken specialtokens bool false int let s see how to increase the vocabulary of bert model and tokenizer note resizetokenembeddings expects to receive the full size of the new vocabulary i e the length of the tokenizer todo this is fairly slow to improve very important for fast and slow equivalence doing token specialtrue changes the normalization will fix in rust this is important and the only reason why the addedtokens in each class are normalized by default normalize if requested the setter automatically updates the reverse map returns the number of added tokens when encoding a sequence with special tokens tip this encodes a dummy input and checks the number of added tokens and is therefore not efficient do not put this inside your training loop tip args pair bool optional defaults to false whether the number of added tokens should be computed in the case of a sequence pair or a single sequence returns int number of special tokens added to sequences converts a string in a sequence of tokens using the tokenizer split in words for wordbased vocabulary or subwords for subwordbased vocabularies bpesentencepieceswordpieces takes care of added tokens args text str the sequence to be encoded kwargs additional keyword arguments passed along to the modelspecific preparefortokenization preprocessing method returns liststr the list of tokens convert nonspecial tokens to lowercase might be super slow as well this is somethingspecialtoken1 else this is something specialtoken1 else a bit counterintuitive but we strip the left of the string since tokextended rstrip means the special token is eating all white spaces on its right strip white spaces on the left this is something specialtoken1 else need to skip eventual empty fully stripped tokens this is something specialtoken1 else converts a string in a sequence of tokens string using the tokenizer split in words for wordbased vocabulary or subwords for subwordbased vocabularies bpesentencepieceswordpieces do not take care of added tokens converts a token string or a sequence of tokens in a single integer id or a sequence of ids using the vocabulary args tokens str or liststr one or several tokens to convert to token ids returns int or listint the token id or list of token ids prepares a sequence of input id or a pair of sequences of inputs ids so that it can be used by the model it adds special tokens truncates sequences if overflowing while taking into account the special tokens and manages a moving window with user defined stride for overflowing tokens args batchidspairs list of tokenized input ids or input ids pairs performs any necessary transformations before tokenization this method should pop the arguments from kwargs and return the remaining kwargs as well we test the kwargs at the end of the encoding process to be sure all the arguments have been used args text str the text to prepare issplitintowords bool optional defaults to false whether or not the input is already pretokenized e g split into words if set to true the tokenizer assumes the input is already split into words for instance by splitting it on whitespace which it will tokenize this is useful for ner or token classification kwargs dictstr any optional keyword arguments to use for the tokenization returns tuplestr dictstr any the prepared text and the unused kwargs retrieves sequence ids from a token list that has no special tokens added this method is called when adding special tokens using the tokenizer prepareformodel or encodeplus methods args tokenids0 listint list of ids of the first sequence tokenids1 listint optional list of ids of the second sequence alreadyhasspecialtokens bool optional defaults to false whether or not the token list is already formatted with special tokens for the model returns a list of integers in the range 0 1 1 for a special token 0 for a sequence token converts a single index or a sequence of indices in a token or a sequence of tokens using the vocabulary and added tokens args ids int or listint the token id or token ids to convert to tokens skipspecialtokens bool optional defaults to false whether or not to remove special tokens in the decoding returns str or liststr the decoded tokens to avoid mixing bytelevel and unicode for bytelevel bpt we need to build string separately for added tokens and bytelevel tokens cf https github comhuggingfacetransformersissues1133 todo arthurz in version 5 special tokens should be handled in converttokenstostring while converttokenstostring coding utf 8 2020 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tokenization classes for python tokenizers for fast tokenizers provided by huggingface s tokenizers library see tokenization_utils_fast py slow tokenizers are saved in a vocabulary plus three separated files trie in python creates a trie out of a list of words the trie is used to split on added_tokens in one pass loose reference https en wikipedia org wiki trie passes over every char utf 8 char on word and recursively adds it to the internal data trie representation the special key is used to represent termination this function is idempotent adding twice the same word will leave the trie unchanged example python trie trie trie add hello 友達 trie data h e l l o 友 達 1 trie add hello trie data h e l l o 1 友 達 1 prevent empty string will look for the words added to the trie within text output is the original string splitted along the boundaries of the words found this trie will match the longest possible word first example python trie trie trie split cls this is a extra_id_100 cls this is a extra_id_100 trie add cls trie add extra_id_1 trie add extra_id_100 trie split cls this is a extra_id_100 cls this is a extra_id_100 indexes are counted left of the chars index hello index 0 is left of h index 1 is between h and e index 5 is right of the o states are going to capture every possible start indexes as above as keys and have as values a pointer to the position in the trie where we re at this is a partial match for now this enables to keep track of multiple matches while we re iterating the string if the trie contains blowing and lower and we encounter the string blower we need to split into b lower this is where we need to keep track of multiple possible starts this will contain every indices where we need to cut we force to cut at offset 0 and len text added later this is used by the lookahead which needs to skip over some text where the full match exceeded the place in the initial for loop main loop giving this algorithm o n complexity prevents the lookahead for matching twice like extra_id_100 and id_100 this will track every state that stop matching we need to stop tracking them if we look at lowball we re going to match l add it to states o w then fail on b we need to remove 0 from the valid states whenever we found a match we need to drop everything this is a greedy algorithm it will match on the first found token in this case we already have partial matches but unfinished this is a final match we need to reset and store the results in offsets lookahead to match longest first important in case of extra_id_1 vs extra_id_100 here we are also actively looking for other earlier partial matches cls l we need to match cls even if l is special this partial match is later we can stop looking this partial match is earlier the trie pointer was already updated so index is 1 here lookstart start and looktrie_pointer trie_pointer it wasn t updated yet so indices are current ones end of string end lookahead storing and resetting the current character being looked at has a match within the trie update the pointer it will be stored back into states later storing back the new pointer into the states partial matches got longer by one the new character has not match in the trie we need to stop keeping track of this partial match we can t do it directly within the loop because of how python iteration works either clearing the full start we found a real match or clearing only the partial matches that didn t work if this character is a starting character within the trie start keeping track of this partial match we have a cut at the end with states this is a final match we need to reset and store the results in offsets longest cut is always the one with lower start so the first item so we need to break we have all the offsets now we just need to do the actual splitting we need to eventually add the first part of the string and the eventual last part this might happen if there s a match at index 0 we re also preventing zero width cuts in case of two consecutive matches checks whether char is a whitespace character t n and r are technically control characters but we treat them as whitespace since they are generally considered as such checks whether char is a control character these are technically control characters but we count them as whitespace characters checks whether char is a punctuation character we treat all non letter number ascii as punctuation characters such as and are not in the unicode punctuation class but we treat them as punctuation anyways for consistency checks whether the last character in text is one of a punctuation control or whitespace character checks whether the first character in text is one of a punctuation control or whitespace character inserts one token to an ordered list if it does not already exist note token_list must be sorted checks if new_token is already in the ordered token_list new_token is in token_list don t add base class for all slow tokenizers inherits from tokenization_utils_base pretrainedtokenizerbase handle all the shared methods for tokenization and special tokens as well as methods downloading caching loading pretrained tokenizers as well as adding tokens to the vocabulary this class also contain the added tokens in a unified way on top of all tokenizers so we don t have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures bpe sentencepiece 1 init the parent class 2 init _added_tokens_decoder if child class did not 3 if a added_tokens_decoder is passed we are loading from a saved tokenizer we overwrite 4 init the parent class 4 if some of the special tokens are not part of the vocab we add them at the end the order of addition is the same as self special_tokens_attributes following tokenizers int size of the base vocabulary without the added tokens returns the sorted mapping from string to index the added tokens encoder is cached for performance optimisation in self _added_tokens_encoder for the slow tokenizers returns the added tokens in the vocabulary as a dictionary of index to addedtoken returns dict str int the added tokens always raise an error if string because users should define the behavior returns the added tokens in the vocabulary as a dictionary of token to index results might be different from the fast call because for now we always add the tokens even if they are already in the vocabulary this is something we should change returns dict str int the added tokens size of the full vocabulary with the added tokens counts the keys and not the values because otherwise if there is a hole in the vocab we will add tokenizers at a wrong index add a list of new tokens to the tokenizer class if the new tokens are not in the vocabulary they are added to it with indices starting from length of the current vocabulary special tokens are sometimes already in the vocab which is why they have to be handled specifically args new_tokens list str or list tokenizers addedtoken token s to add in vocabulary a token is counted as added if it s not already in the vocabulary tested by checking if the tokenizer assign the index of the unk_token to them if a token is part of the vocabulary then we simply mark this token as an addedtoken which allows to control the stripping and normalization of this token this is not possible in tokenizers special_tokens bool optional defaults to false whether or not the tokens should be added as special tokens returns int the number of tokens actually added to the vocabulary examples python let s see how to increase the vocabulary of bert model and tokenizer tokenizer berttokenizer from_pretrained bert base uncased model bertmodel from_pretrained bert base uncased num_added_toks tokenizer add_tokens new_tok1 my_new tok2 print we have added num_added_toks tokens note resize_token_embeddings expects to receive the full size of the new vocabulary i e the length of the tokenizer model resize_token_embeddings len tokenizer todo this is fairly slow to improve only call this once len gives the last index 1 very important for fast and slow equivalence doing token special true changes the normalization will fix in rust this is important and the only reason why the addedtokens in each class are normalized by default normalize if requested the setter automatically updates the reverse map returns the number of added tokens when encoding a sequence with special tokens tip this encodes a dummy input and checks the number of added tokens and is therefore not efficient do not put this inside your training loop tip args pair bool optional defaults to false whether the number of added tokens should be computed in the case of a sequence pair or a single sequence returns int number of special tokens added to sequences converts a string in a sequence of tokens using the tokenizer split in words for word based vocabulary or sub words for sub word based vocabularies bpe sentencepieces wordpieces takes care of added tokens args text str the sequence to be encoded kwargs additional keyword arguments passed along to the model specific prepare_for_tokenization preprocessing method returns list str the list of tokens convert non special tokens to lowercase might be super slow as well don t split on any of the added tokens this is something special_token_1 else this is something special_token_1 else a bit counter intuitive but we strip the left of the string since tok_extended rstrip means the special token is eating all white spaces on its right strip white spaces on the left opposite here this is something special_token_1 else need to skip eventual empty fully stripped tokens this is something special_token_1 else converts a string in a sequence of tokens string using the tokenizer split in words for word based vocabulary or sub words for sub word based vocabularies bpe sentencepieces wordpieces do not take care of added tokens converts a token string or a sequence of tokens in a single integer id or a sequence of ids using the vocabulary args tokens str or list str one or several token s to convert to token id s returns int or list int the token id or list of token ids prepares a sequence of input id or a pair of sequences of inputs ids so that it can be used by the model it adds special tokens truncates sequences if overflowing while taking into account the special tokens and manages a moving window with user defined stride for overflowing tokens args batch_ids_pairs list of tokenized input ids or input ids pairs we pad in batch afterward we pad in batch afterward we pad in batch afterward we convert the whole batch to tensors at the end performs any necessary transformations before tokenization this method should pop the arguments from kwargs and return the remaining kwargs as well we test the kwargs at the end of the encoding process to be sure all the arguments have been used args text str the text to prepare is_split_into_words bool optional defaults to false whether or not the input is already pre tokenized e g split into words if set to true the tokenizer assumes the input is already split into words for instance by splitting it on whitespace which it will tokenize this is useful for ner or token classification kwargs dict str any optional keyword arguments to use for the tokenization returns tuple str dict str any the prepared text and the unused kwargs retrieves sequence ids from a token list that has no special tokens added this method is called when adding special tokens using the tokenizer prepare_for_model or encode_plus methods args token_ids_0 list int list of ids of the first sequence token_ids_1 list int optional list of ids of the second sequence already_has_special_tokens bool optional defaults to false whether or not the token list is already formatted with special tokens for the model returns a list of integers in the range 0 1 1 for a special token 0 for a sequence token converts a single index or a sequence of indices in a token or a sequence of tokens using the vocabulary and added tokens args ids int or list int the token id or token ids to convert to tokens skip_special_tokens bool optional defaults to false whether or not to remove special tokens in the decoding returns str or list str the decoded token s to avoid mixing byte level and unicode for byte level bpt we need to build string separately for added tokens and byte level tokens cf https github com huggingface transformers issues 1133 todo arthurz in version 5 special tokens should be handled in convert_tokens_to_string while _convert_tokens_to_string
import bisect import itertools import re import unicodedata from collections import OrderedDict from typing import Any, Dict, List, Optional, Tuple, Union, overload from .tokenization_utils_base import ( ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, INIT_TOKENIZER_DOCSTRING, AddedToken, BatchEncoding, EncodedInput, EncodedInputPair, PreTokenizedInput, PreTokenizedInputPair, PreTrainedTokenizerBase, TextInput, TextInputPair, TruncationStrategy, ) from .utils import PaddingStrategy, TensorType, add_end_docstrings, logging logger = logging.get_logger(__name__) SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json" ADDED_TOKENS_FILE = "added_tokens.json" TOKENIZER_CONFIG_FILE = "tokenizer_config.json" class Trie: def __init__(self): self.data = {} self._tokens = set() def add(self, word: str): if not word: return self._tokens.add(word) ref = self.data for char in word: ref[char] = char in ref and ref[char] or {} ref = ref[char] ref[""] = 1 def split(self, text: str) -> List[str]: states = OrderedDict() offsets = [0] skip = 0 for current, current_char in enumerate(text): if skip and current < skip: continue to_remove = set() reset = False for start, trie_pointer in states.items(): if "" in trie_pointer: for lookstart, looktrie_pointer in states.items(): if lookstart > start: break elif lookstart < start: lookahead_index = current + 1 end = current + 1 else: lookahead_index = current end = current next_char = text[lookahead_index] if lookahead_index < len(text) else None if "" in looktrie_pointer: start = lookstart end = lookahead_index skip = lookahead_index while next_char in looktrie_pointer: looktrie_pointer = looktrie_pointer[next_char] lookahead_index += 1 if "" in looktrie_pointer: start = lookstart end = lookahead_index skip = lookahead_index if lookahead_index == len(text): break next_char = text[lookahead_index] offsets.append(start) offsets.append(end) reset = True break elif current_char in trie_pointer: trie_pointer = trie_pointer[current_char] states[start] = trie_pointer else: to_remove.add(start) if reset: states = {} else: for start in to_remove: del states[start] if current >= skip and current_char in self.data: states[current] = self.data[current_char] for start, trie_pointer in states.items(): if "" in trie_pointer: end = len(text) offsets.append(start) offsets.append(end) break return self.cut_text(text, offsets) def cut_text(self, text, offsets): offsets.append(len(text)) tokens = [] start = 0 for end in offsets: if start > end: logger.error( "There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it" " anyway." ) continue elif start == end: continue tokens.append(text[start:end]) start = end return tokens def _is_whitespace(char): if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): cp = ord(char) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def _is_end_of_word(text): last_char = text[-1] return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char)) def _is_start_of_word(text): first_char = text[0] return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char)) def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str): insertion_idx = bisect.bisect_left(token_list, new_token) if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token: return else: token_list.insert(insertion_idx, new_token) @add_end_docstrings(INIT_TOKENIZER_DOCSTRING) class PreTrainedTokenizer(PreTrainedTokenizerBase): def __init__(self, **kwargs): self.tokens_trie = Trie() if not hasattr(self, "_added_tokens_decoder"): self._added_tokens_decoder: Dict[int, AddedToken] = {} self._added_tokens_decoder.update(kwargs.pop("added_tokens_decoder", {})) self._added_tokens_encoder: Dict[str, int] = {k.content: v for v, k in self._added_tokens_decoder.items()} super().__init__(**kwargs) self._add_tokens( [token for token in self.all_special_tokens_extended if token not in self._added_tokens_encoder], special_tokens=True, ) self._decode_use_source_tokenizer = False @property def is_fast(self) -> bool: return False @property def vocab_size(self) -> int: raise NotImplementedError @property def added_tokens_encoder(self) -> Dict[str, int]: return {k.content: v for v, k in sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])} @property def added_tokens_decoder(self) -> Dict[int, AddedToken]: return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])) @added_tokens_decoder.setter def added_tokens_decoder(self, value: Dict[int, Union[AddedToken, str]]) -> Dict[int, AddedToken]: for index, token in value.items(): if not isinstance(token, (str, AddedToken)) or not isinstance(index, int): raise ValueError( f"The provided `added_tokens_decoder` has an element of type {index.__class__, token.__class__}, should be a dict of {int, Union[AddedToken, str]}" ) self._added_tokens_decoder[index] = AddedToken(token) if isinstance(token, str) else token self._added_tokens_encoder[str(token)] = index def get_added_vocab(self) -> Dict[str, int]: return self._added_tokens_encoder def __len__(self): return len(set(self.get_vocab().keys())) def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: added_tokens = 0 if new_tokens is None: return added_tokens current_vocab = self.get_vocab().copy() new_idx = len(current_vocab) for token in new_tokens: if not isinstance(token, (str, AddedToken)): raise TypeError(f"Token {token} is not a string but a {type(token)}.") if str(token) == "": continue if isinstance(token, str): if token in self._added_tokens_encoder: continue else: is_special = token in self.all_special_tokens or special_tokens token = AddedToken( token, rstrip=False, lstrip=False, normalized=not is_special, special=is_special ) elif special_tokens: token.__setstate__({"special": True, "normalized": token.normalized}) if token in self._added_tokens_decoder: continue if not token.special and token.normalized and getattr(self, "do_lower_case", False): token.content = token.content.lower() if token.content not in current_vocab: token_index = new_idx + added_tokens current_vocab[token.content] = token_index added_tokens += 1 else: token_index = current_vocab[token.content] if token.special and str(token) not in self.all_special_tokens: self._additional_special_tokens.append(token) self._added_tokens_decoder[token_index] = token self._added_tokens_encoder[token.content] = token_index if self.verbose: logger.info(f"Adding {token} to the vocabulary") self._update_trie() return added_tokens def _update_trie(self, unique_no_split_tokens: Optional[str] = []): for token in self._added_tokens_decoder.values(): if token not in self.tokens_trie._tokens: self.tokens_trie.add(token.content) for token in unique_no_split_tokens: if token not in self.tokens_trie._tokens: self.tokens_trie.add(token) def num_special_tokens_to_add(self, pair: bool = False) -> int: token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) def tokenize(self, text: TextInput, **kwargs) -> List[str]: split_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens) text, kwargs = self.prepare_for_tokenization(text, **kwargs) if kwargs: logger.warning(f"Keyword arguments {kwargs} not recognized.") if hasattr(self, "do_lower_case") and self.do_lower_case: escaped_special_toks = [re.escape(s_tok) for s_tok in (self.all_special_tokens)] escaped_special_toks += [ re.escape(s_tok.content) for s_tok in (self._added_tokens_decoder.values()) if not s_tok.special and s_tok.normalized ] pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)" text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text) if split_special_tokens: no_split_token = [] tokens = [text] else: no_split_token = self._added_tokens_encoder.keys() tokens = self.tokens_trie.split(text) for i, token in enumerate(tokens): if token in no_split_token: tok_extended = self._added_tokens_decoder.get(self._added_tokens_encoder[token], None) left = tokens[i - 1] if i > 0 else None right = tokens[i + 1] if i < len(tokens) - 1 else None if isinstance(tok_extended, AddedToken): if tok_extended.rstrip and right: tokens[i + 1] = right.lstrip() if tok_extended.lstrip and left: tokens[i - 1] = left.rstrip() if tok_extended.single_word and left and left[-1] != " ": tokens[i - 1] += token tokens[i] = "" elif tok_extended.single_word and right and right[0] != " ": tokens[i + 1] = token + tokens[i + 1] tokens[i] = "" else: raise ValueError( f"{tok_extended} cannot be tokenized because it was not properly added" f" to the tokenizer. This means that it is not an `AddedToken` but a {type(tok_extended)}" ) tokenized_text = [] for token in tokens: if not token: continue if token in no_split_token: tokenized_text.append(token) else: tokenized_text.extend(self._tokenize(token)) return tokenized_text def _tokenize(self, text, **kwargs): raise NotImplementedError def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: if tokens is None: return None if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) ids = [] for token in tokens: ids.append(self._convert_token_to_id_with_added_voc(token)) return ids def _convert_token_to_id_with_added_voc(self, token): if token is None: return None if token in self._added_tokens_encoder: return self._added_tokens_encoder[token] return self._convert_token_to_id(token) def _convert_token_to_id(self, token): raise NotImplementedError def _encode_plus( self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: if is_split_into_words: raise ValueError( f"Input {text} is not valid. Should be a string or a list/tuple of strings when" " `is_split_into_words=True`." ) else: raise ValueError( f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of" " integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast. " "More information on available tokenizers at " "https://github.com/huggingface/transformers/pull/2674" ) first_ids = get_input_ids(text) second_ids = get_input_ids(text_pair) if text_pair is not None else None return self.prepare_for_model( first_ids, pair_ids=second_ids, add_special_tokens=add_special_tokens, padding=padding_strategy.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, prepend_batch_axis=True, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, verbose=verbose, ) def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair], List[EncodedInput], List[EncodedInputPair], ], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, is_split_into_words: bool = False, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: def get_input_ids(text): if isinstance(text, str): tokens = self.tokenize(text, **kwargs) return self.convert_tokens_to_ids(tokens) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str): if is_split_into_words: tokens = list( itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text)) ) return self.convert_tokens_to_ids(tokens) else: return self.convert_tokens_to_ids(text) elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int): return text else: raise ValueError( "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers." ) if return_offsets_mapping: raise NotImplementedError( "return_offset_mapping is not available when using Python tokenizers. " "To use this feature, change your tokenizer to one deriving from " "transformers.PreTrainedTokenizerFast." ) input_ids = [] for ids_or_pair_ids in batch_text_or_text_pairs: if not isinstance(ids_or_pair_ids, (list, tuple)): ids, pair_ids = ids_or_pair_ids, None elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)): ids, pair_ids = ids_or_pair_ids, None else: ids, pair_ids = ids_or_pair_ids first_ids = get_input_ids(ids) second_ids = get_input_ids(pair_ids) if pair_ids is not None else None input_ids.append((first_ids, second_ids)) batch_outputs = self._batch_prepare_for_model( input_ids, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=return_tensors, verbose=verbose, ) return BatchEncoding(batch_outputs) @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) def _batch_prepare_for_model( self, batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]], add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_length: bool = False, verbose: bool = True, ) -> BatchEncoding: batch_outputs = {} for first_ids, second_ids in batch_ids_pairs: outputs = self.prepare_for_model( first_ids, second_ids, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, stride=stride, pad_to_multiple_of=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose, ) for key, value in outputs.items(): if key not in batch_outputs: batch_outputs[key] = [] batch_outputs[key].append(value) batch_outputs = self.pad( batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) return batch_outputs def prepare_for_tokenization( self, text: str, is_split_into_words: bool = False, **kwargs ) -> Tuple[str, Dict[str, Any]]: return (text, kwargs) def get_special_tokens_mask( self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)) @overload def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str: ... @overload def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]: ... def convert_ids_to_tokens( self, ids: Union[int, List[int]], skip_special_tokens: bool = False ) -> Union[str, List[str]]: if isinstance(ids, int): if ids in self._added_tokens_decoder: return self._added_tokens_decoder[ids].content else: return self._convert_id_to_token(ids) tokens = [] for index in ids: index = int(index) if skip_special_tokens and index in self.all_special_ids: continue if index in self._added_tokens_decoder: tokens.append(self._added_tokens_decoder[index].content) else: tokens.append(self._convert_id_to_token(index)) return tokens def _convert_id_to_token(self, index: int) -> str: raise NotImplementedError def convert_tokens_to_string(self, tokens: List[str]) -> str: return " ".join(tokens) def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, spaces_between_special_tokens: bool = True, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) legacy_added_tokens = set(self._added_tokens_encoder.keys()) - set(self.all_special_tokens) | { token for token in self.additional_special_tokens if self.convert_tokens_to_ids(token) >= self.vocab_size } sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in legacy_added_tokens: if current_sub_text: string = self.convert_tokens_to_string(current_sub_text) if len(string) > 0: sub_texts.append(string) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) if spaces_between_special_tokens: text = " ".join(sub_texts) else: text = "".join(sub_texts) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from typing import TYPE_CHECKING from ..utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _import_structure = { "agents": ["Agent", "AzureOpenAiAgent", "HfAgent", "LocalAgent", "OpenAiAgent"], "base": ["PipelineTool", "RemoteTool", "Tool", "launch_gradio_demo", "load_tool"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["document_question_answering"] = ["DocumentQuestionAnsweringTool"] _import_structure["image_captioning"] = ["ImageCaptioningTool"] _import_structure["image_question_answering"] = ["ImageQuestionAnsweringTool"] _import_structure["image_segmentation"] = ["ImageSegmentationTool"] _import_structure["speech_to_text"] = ["SpeechToTextTool"] _import_structure["text_classification"] = ["TextClassificationTool"] _import_structure["text_question_answering"] = ["TextQuestionAnsweringTool"] _import_structure["text_summarization"] = ["TextSummarizationTool"] _import_structure["text_to_speech"] = ["TextToSpeechTool"] _import_structure["translation"] = ["TranslationTool"] if TYPE_CHECKING: from .agents import Agent, AzureOpenAiAgent, HfAgent, LocalAgent, OpenAiAgent from .base import PipelineTool, RemoteTool, Tool, launch_gradio_demo, load_tool try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .document_question_answering import DocumentQuestionAnsweringTool from .image_captioning import ImageCaptioningTool from .image_question_answering import ImageQuestionAnsweringTool from .image_segmentation import ImageSegmentationTool from .speech_to_text import SpeechToTextTool from .text_classification import TextClassificationTool from .text_question_answering import TextQuestionAnsweringTool from .text_summarization import TextSummarizationTool from .text_to_speech import TextToSpeechTool from .translation import TranslationTool else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
codingutf8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license abstract class to be reimplemented to define types that can be returned by agents these objects serve three purposes they behave as they were the type they re meant to be e g a string for text a pil image for images they can be stringified strobject in order to return a string defining the object they should be displayed correctly in ipython notebookscolabjupyter text type returned by the agent behaves as a string image type returned by the agent behaves as a pil image displays correctly this type in an ipython notebook ipython colab jupyter returns the raw version of that object in the case of an agentimage it is a pil image returns the stringified version of that object in the case of an agentimage it is a path to the serialized version of the image there is likely simpler than load into image into save audio type returned by the agent displays correctly this type in an ipython notebook ipython colab jupyter returns the raw version of that object it is a torch tensor object returns the stringified version of that object in the case of an agentaudio it is a path to the serialized version of the audio if the class has defined outputs we can map directly according to the class definition if the class does not have defined output then we map according to the type if the class has defined outputs we can map directly according to the class definition if the class does not have defined output then we map according to the type if the class has defined outputs we can map directly according to the class definition if the class does not have defined output then we map according to the type coding utf 8 2023 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license abstract class to be reimplemented to define types that can be returned by agents these objects serve three purposes they behave as they were the type they re meant to be e g a string for text a pil image for images they can be stringified str object in order to return a string defining the object they should be displayed correctly in ipython notebooks colab jupyter text type returned by the agent behaves as a string image type returned by the agent behaves as a pil image displays correctly this type in an ipython notebook ipython colab jupyter returns the raw version of that object in the case of an agentimage it is a pil image returns the stringified version of that object in the case of an agentimage it is a path to the serialized version of the image there is likely simpler than load into image into save audio type returned by the agent displays correctly this type in an ipython notebook ipython colab jupyter returns the raw version of that object it is a torch tensor object returns the stringified version of that object in the case of an agentaudio it is a path to the serialized version of the audio if the class has defined outputs we can map directly according to the class definition if the class does not have defined output then we map according to the type if the class has defined outputs we can map directly according to the class definition if the class does not have defined output then we map according to the type if the class has defined outputs we can map directly according to the class definition if the class does not have defined output then we map according to the type
import os import pathlib import tempfile import uuid import numpy as np from ..utils import is_soundfile_availble, is_torch_available, is_vision_available, logging logger = logging.get_logger(__name__) if is_vision_available(): import PIL.Image from PIL import Image from PIL.Image import Image as ImageType else: ImageType = object if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf class AgentType: def __init__(self, value): self._value = value def __str__(self): return self.to_string() def to_raw(self): logger.error( "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable" ) return self._value def to_string(self) -> str: logger.error( "This is a raw AgentType of unknown type. Display in notebooks and string conversion will be unreliable" ) return str(self._value) class AgentText(AgentType, str): def to_raw(self): return self._value def to_string(self): return self._value class AgentImage(AgentType, ImageType): def __init__(self, value): super().__init__(value) if not is_vision_available(): raise ImportError("PIL must be installed in order to handle images.") self._path = None self._raw = None self._tensor = None if isinstance(value, ImageType): self._raw = value elif isinstance(value, (str, pathlib.Path)): self._path = value elif isinstance(value, torch.Tensor): self._tensor = value else: raise ValueError(f"Unsupported type for {self.__class__.__name__}: {type(value)}") def _ipython_display_(self, include=None, exclude=None): from IPython.display import Image, display display(Image(self.to_string())) def to_raw(self): if self._raw is not None: return self._raw if self._path is not None: self._raw = Image.open(self._path) return self._raw def to_string(self): if self._path is not None: return self._path if self._raw is not None: directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + ".png") self._raw.save(self._path) return self._path if self._tensor is not None: array = self._tensor.cpu().detach().numpy() img = Image.fromarray((array * 255).astype(np.uint8)) directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + ".png") img.save(self._path) return self._path class AgentAudio(AgentType): def __init__(self, value, samplerate=16_000): super().__init__(value) if not is_soundfile_availble(): raise ImportError("soundfile must be installed in order to handle audio.") self._path = None self._tensor = None self.samplerate = samplerate if isinstance(value, (str, pathlib.Path)): self._path = value elif isinstance(value, torch.Tensor): self._tensor = value else: raise ValueError(f"Unsupported audio type: {type(value)}") def _ipython_display_(self, include=None, exclude=None): from IPython.display import Audio, display display(Audio(self.to_string(), rate=self.samplerate)) def to_raw(self): if self._tensor is not None: return self._tensor if self._path is not None: tensor, self.samplerate = sf.read(self._path) self._tensor = torch.tensor(tensor) return self._tensor def to_string(self): if self._path is not None: return self._path if self._tensor is not None: directory = tempfile.mkdtemp() self._path = os.path.join(directory, str(uuid.uuid4()) + ".wav") sf.write(self._path, self._tensor, samplerate=self.samplerate) return self._path AGENT_TYPE_MAPPING = {"text": AgentText, "image": AgentImage, "audio": AgentAudio} INSTANCE_TYPE_MAPPING = {str: AgentText} if is_vision_available(): INSTANCE_TYPE_MAPPING[PIL.Image] = AgentImage def handle_agent_inputs(*args, **kwargs): args = [(arg.to_raw() if isinstance(arg, AgentType) else arg) for arg in args] kwargs = {k: (v.to_raw() if isinstance(v, AgentType) else v) for k, v in kwargs.items()} return args, kwargs def handle_agent_outputs(outputs, output_types=None): if isinstance(outputs, dict): decoded_outputs = {} for i, (k, v) in enumerate(outputs.items()): if output_types is not None: if output_types[i] in AGENT_TYPE_MAPPING: decoded_outputs[k] = AGENT_TYPE_MAPPING[output_types[i]](v) else: decoded_outputs[k] = AgentType(v) else: for _k, _v in INSTANCE_TYPE_MAPPING.items(): if isinstance(v, _k): decoded_outputs[k] = _v(v) if k not in decoded_outputs: decoded_outputs[k] = AgentType[v] elif isinstance(outputs, (list, tuple)): decoded_outputs = type(outputs)() for i, v in enumerate(outputs): if output_types is not None: if output_types[i] in AGENT_TYPE_MAPPING: decoded_outputs.append(AGENT_TYPE_MAPPING[output_types[i]](v)) else: decoded_outputs.append(AgentType(v)) else: found = False for _k, _v in INSTANCE_TYPE_MAPPING.items(): if isinstance(v, _k): decoded_outputs.append(_v(v)) found = True if not found: decoded_outputs.append(AgentType(v)) else: if output_types[0] in AGENT_TYPE_MAPPING: decoded_outputs = AGENT_TYPE_MAPPING[output_types[0]](outputs) else: for _k, _v in INSTANCE_TYPE_MAPPING.items(): if isinstance(outputs, _k): return _v(outputs) return AgentType(outputs) return decoded_outputs
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license base class for all agents which contains the main api methods args chatprompttemplate str optional pass along your own prompt if you want to override the default template for the chat method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named chatprompttemplate txt in this repo in this case runprompttemplate str optional pass along your own prompt if you want to override the default template for the run method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named runprompttemplate txt in this repo in this case additionaltools tool list of tools or dictionary with tool values optional any additional tools to include on top of the default ones if you pass along a tool with the same name as one of the default tools that default tool will be overridden get all tool currently available to the agent return self toolbox def formatpromptself task chatmodefalse description n joinf name tool description for name tool in self toolbox items if chatmode if self chathistory is none prompt self chatprompttemplate replacealltools description else prompt self chathistory prompt chatmessageprompt replacetask task else prompt self runprompttemplate replacealltools description prompt prompt replaceprompt task return prompt def setstreamself streamer self log streamer def chatself task returncodefalse remotefalse kwargs prompt self formatprompttask chatmodetrue result self generateoneprompt stophuman self chathistory prompt result strip n explanation code cleancodeforchatresult self logfexplanation from the agentnexplanation if code is not none self logfnncode generated by the agentncode if not returncode self lognnresult self cachedtools resolvetoolscode self toolbox remoteremote cachedtoolsself cachedtools self chatstate updatekwargs return evaluatecode self cachedtools self chatstate chatmodetrue else toolcode gettoolcreationcodecode self toolbox remoteremote return ftoolcodencode def preparefornewchatself self chathistory none self chatstate self cachedtools none def runself task returncodefalse remotefalse kwargs prompt self formatprompttask result self generateoneprompt stoptask explanation code cleancodeforrunresult self logfexplanation from the agentnexplanation self logfnncode generated by the agentncode if not returncode self lognnresult self cachedtools resolvetoolscode self toolbox remoteremote cachedtoolsself cachedtools return evaluatecode self cachedtools statekwargs copy else toolcode gettoolcreationcodecode self toolbox remoteremote return ftoolcodencode def generateoneself prompt stop this is the method to implement in your custom agent raise notimplementederror def generatemanyself prompts stop override if you have a way to do batch generation faster than one by one return self generateoneprompt stop for prompt in prompts class openaiagentagent def init self modeltextdavinci003 apikeynone chatprompttemplatenone runprompttemplatenone additionaltoolsnone if not isopenaiavailable raise importerrorusing openaiagent requires openai pip install openai if apikey is none apikey os environ getopenaiapikey none if apikey is none raise valueerror you need an openai key to use openaiagent you can get one here get one here https openai comapi if you have one set it in your env with os environ openaiapikey xxx else openai apikey apikey self model model super init chatprompttemplatechatprompttemplate runprompttemplaterunprompttemplate additionaltoolsadditionaltools def generatemanyself prompts stop if gpt in self model return self chatgenerateprompt stop for prompt in prompts else return self completiongenerateprompts stop def generateoneself prompt stop if gpt in self model return self chatgenerateprompt stop else return self completiongenerateprompt stop0 def chatgenerateself prompt stop result openai chat completions create modelself model messagesrole user content prompt temperature0 stopstop return result choices0 message content def completiongenerateself prompts stop result openai completion create modelself model promptprompts temperature0 stopstop maxtokens200 return answertext for answer in resultchoices class azureopenaiagentagent def init self deploymentid apikeynone resourcenamenone apiversion20221201 ischatmodelnone chatprompttemplatenone runprompttemplatenone additionaltoolsnone if not isopenaiavailable raise importerrorusing openaiagent requires openai pip install openai self deploymentid deploymentid openai apitype azure if apikey is none apikey os environ getazureopenaiapikey none if apikey is none raise valueerror you need an azure openai key to use azureopenaiagent if you have one set it in your env with os environ azureopenaiapikey xxx else openai apikey apikey if resourcename is none resourcename os environ getazureopenairesourcename none if resourcename is none raise valueerror you need a resourcename to use azureopenaiagent if you have one set it in your env with os environ azureopenairesourcename xxx else openai apibase fhttps resourcename openai azure com openai apiversion apiversion if ischatmodel is none ischatmodel gpt in deploymentid lower self ischatmodel ischatmodel super init chatprompttemplatechatprompttemplate runprompttemplaterunprompttemplate additionaltoolsadditionaltools def generatemanyself prompts stop if self ischatmodel return self chatgenerateprompt stop for prompt in prompts else return self completiongenerateprompts stop def generateoneself prompt stop if self ischatmodel return self chatgenerateprompt stop else return self completiongenerateprompt stop0 def chatgenerateself prompt stop result openai chatcompletion create engineself deploymentid messagesrole user content prompt temperature0 stopstop return resultchoices0messagecontent def completiongenerateself prompts stop result openai completion create engineself deploymentid promptprompts temperature0 stopstop maxtokens200 return answertext for answer in resultchoices class hfagentagent def init self urlendpoint tokennone chatprompttemplatenone runprompttemplatenone additionaltoolsnone self urlendpoint urlendpoint if token is none self token fbearer hffolder gettoken elif token startswithbearer or token startswithbasic self token token else self token fbearer token super init chatprompttemplatechatprompttemplate runprompttemplaterunprompttemplate additionaltoolsadditionaltools def generateoneself prompt stop headers ization self token inputs inputs prompt parameters maxnewtokens 200 returnfulltext false stop stop response requests postself urlendpoint jsoninputs headersheaders if response statuscode 429 logger infogetting ratelimited waiting a tiny bit before trying again time sleep1 return self generateoneprompt elif response statuscode 200 raise valueerrorferror response statuscode response json result response json0generatedtext inference api returns the stop sequence for stopseq in stop if result endswithstopseq return result lenstopseq return result class localagentagent def initself model tokenizer chatprompttemplatenone runprompttemplatenone additionaltoolsnone self model model self tokenizer tokenizer super init chatprompttemplatechatprompttemplate runprompttemplaterunprompttemplate additionaltoolsadditionaltools classmethod def frompretrainedcls pretrainedmodelnameorpath kwargs model automodelforcausallm frompretrainedpretrainedmodelnameorpath kwargs tokenizer autotokenizer frompretrainedpretrainedmodelnameorpath kwargs return clsmodel tokenizer property def modeldeviceself if hasattrself model hfdevicemap return listself model hfdevicemap values0 for param in self model parameters return param device def generateoneself prompt stop encodedinputs self tokenizerprompt returntensorspt toself modeldevice srclen encodedinputsinputids shape1 stoppingcriteria stoppingcriterialiststopsequencecriteriastop self tokenizer outputs self model generate encodedinputsinputids maxnewtokens200 stoppingcriteriastoppingcriteria result self tokenizer decodeoutputs0 tolistsrclen inference api returns the stop sequence for stopseq in stop if result endswithstopseq result result lenstopseq return result class stopsequencecriteriastoppingcriteria def initself stopsequences tokenizer if isinstancestopsequences str stopsequences stopsequences self stopsequences stopsequences self tokenizer tokenizer def callself inputids scores kwargs bool decodedoutput self tokenizer decodeinputids tolist0 return anydecodedoutput endswithstopsequence for stopsequence in self stopsequences usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license base class for all agents which contains the main api methods args chat_prompt_template str optional pass along your own prompt if you want to override the default template for the chat method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named chat_prompt_template txt in this repo in this case run_prompt_template str optional pass along your own prompt if you want to override the default template for the run method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named run_prompt_template txt in this repo in this case additional_tools tool list of tools or dictionary with tool values optional any additional tools to include on top of the default ones if you pass along a tool with the same name as one of the default tools that default tool will be overridden get all tool currently available to the agent set the function use to stream results which is print by default args streamer callable the function to call when streaming results from the llm sends a new request to the agent in a chat will use the previous ones in its history args task str the task to perform return_code bool optional defaults to false whether to just return code and not evaluate it remote bool optional defaults to false whether or not to use remote tools inference endpoints instead of local ones kwargs additional keyword arguments optional any keyword argument to send to the agent when evaluating the code example py from transformers import hfagent agent hfagent https api inference huggingface co models bigcode starcoder agent chat draw me a picture of rivers and lakes agent chat transform the picture so that there is a rock in there clears the history of prior calls to agent chat sends a request to the agent args task str the task to perform return_code bool optional defaults to false whether to just return code and not evaluate it remote bool optional defaults to false whether or not to use remote tools inference endpoints instead of local ones kwargs additional keyword arguments optional any keyword argument to send to the agent when evaluating the code example py from transformers import hfagent agent hfagent https api inference huggingface co models bigcode starcoder agent run draw me a picture of rivers and lakes this is the method to implement in your custom agent override if you have a way to do batch generation faster than one by one agent that uses the openai api to generate code tip warning true the openai models are used in generation mode so even for the chat api it s better to use models like text davinci 003 over the chat gpt variant proper support for chat gpt models will come in a next version tip args model str optional defaults to text davinci 003 the name of the openai model to use api_key str optional the api key to use if unset will look for the environment variable openai_api_key chat_prompt_template str optional pass along your own prompt if you want to override the default template for the chat method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named chat_prompt_template txt in this repo in this case run_prompt_template str optional pass along your own prompt if you want to override the default template for the run method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named run_prompt_template txt in this repo in this case additional_tools tool list of tools or dictionary with tool values optional any additional tools to include on top of the default ones if you pass along a tool with the same name as one of the default tools that default tool will be overridden example py from transformers import openaiagent agent openaiagent model text davinci 003 api_key xxx agent run is the following text in spanish positive or negative text este es un api muy agradable agent that uses azure openai to generate code see the official documentation https learn microsoft com en us azure cognitive services openai to learn how to deploy an openai model on azure tip warning true the openai models are used in generation mode so even for the chat api it s better to use models like text davinci 003 over the chat gpt variant proper support for chat gpt models will come in a next version tip args deployment_id str the name of the deployed azure openai model to use api_key str optional the api key to use if unset will look for the environment variable azure_openai_api_key resource_name str optional the name of your azure openai resource if unset will look for the environment variable azure_openai_resource_name api_version str optional default to 2022 12 01 the api version to use for this agent is_chat_mode bool optional whether you are using a completion model or a chat model see note above chat models won t be as efficient will default to gpt being in the deployment_id or not chat_prompt_template str optional pass along your own prompt if you want to override the default template for the chat method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named chat_prompt_template txt in this repo in this case run_prompt_template str optional pass along your own prompt if you want to override the default template for the run method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named run_prompt_template txt in this repo in this case additional_tools tool list of tools or dictionary with tool values optional any additional tools to include on top of the default ones if you pass along a tool with the same name as one of the default tools that default tool will be overridden example py from transformers import azureopenaiagent agent azureaiagent deployment_id davinci 003 api_key xxx resource_name yyy agent run is the following text in spanish positive or negative text este es un api muy agradable agent that uses an inference endpoint to generate code args url_endpoint str the name of the url endpoint to use token str optional the token to use as http bearer ization for remote files if unset will use the token generated when running huggingface cli login stored in huggingface chat_prompt_template str optional pass along your own prompt if you want to override the default template for the chat method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named chat_prompt_template txt in this repo in this case run_prompt_template str optional pass along your own prompt if you want to override the default template for the run method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named run_prompt_template txt in this repo in this case additional_tools tool list of tools or dictionary with tool values optional any additional tools to include on top of the default ones if you pass along a tool with the same name as one of the default tools that default tool will be overridden example py from transformers import hfagent agent hfagent https api inference huggingface co models bigcode starcoder agent run is the following text in spanish positive or negative text este es un api muy agradable inference api returns the stop sequence agent that uses a local model and tokenizer to generate code args model pretrainedmodel the model to use for the agent tokenizer pretrainedtokenizer the tokenizer to use for the agent chat_prompt_template str optional pass along your own prompt if you want to override the default template for the chat method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named chat_prompt_template txt in this repo in this case run_prompt_template str optional pass along your own prompt if you want to override the default template for the run method can be the actual prompt template or a repo id on the hugging face hub the prompt should be in a file named run_prompt_template txt in this repo in this case additional_tools tool list of tools or dictionary with tool values optional any additional tools to include on top of the default ones if you pass along a tool with the same name as one of the default tools that default tool will be overridden example py import torch from transformers import automodelforcausallm autotokenizer localagent checkpoint bigcode starcoder model automodelforcausallm from_pretrained checkpoint device_map auto torch_dtype torch bfloat16 tokenizer autotokenizer from_pretrained checkpoint agent localagent model tokenizer agent run draw me a picture of rivers and lakes convenience method to build a localagent from a pretrained checkpoint args pretrained_model_name_or_path str or os pathlike the name of a repo on the hub or a local path to a folder containing both model and tokenizer kwargs dict str any optional keyword arguments passed along to pretrainedmodel from_pretrained example py import torch from transformers import localagent agent localagent from_pretrained bigcode starcoder device_map auto torch_dtype torch bfloat16 agent run draw me a picture of rivers and lakes inference api returns the stop sequence this class can be used to stop generation whenever a sequence of tokens is encountered args stop_sequences str or list str the sequence or list of sequences on which to stop execution tokenizer the tokenizer used to decode the model outputs
import importlib.util import json import os import time from dataclasses import dataclass from typing import Dict import requests from huggingface_hub import HfFolder, hf_hub_download, list_spaces from ..models.auto import AutoTokenizer from ..utils import is_offline_mode, is_openai_available, is_torch_available, logging from .base import TASK_MAPPING, TOOL_CONFIG_FILE, Tool, load_tool, supports_remote from .prompts import CHAT_MESSAGE_PROMPT, download_prompt from .python_interpreter import evaluate logger = logging.get_logger(__name__) if is_openai_available(): import openai if is_torch_available(): from ..generation import StoppingCriteria, StoppingCriteriaList from ..models.auto import AutoModelForCausalLM else: StoppingCriteria = object _tools_are_initialized = False BASE_PYTHON_TOOLS = { "print": print, "range": range, "float": float, "int": int, "bool": bool, "str": str, } @dataclass class PreTool: task: str description: str repo_id: str HUGGINGFACE_DEFAULT_TOOLS = {} HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [ "image-transformation", "text-download", "text-to-image", "text-to-video", ] def get_remote_tools(organization="huggingface-tools"): if is_offline_mode(): logger.info("You are in offline mode, so remote tools are not available.") return {} spaces = list_spaces(author=organization) tools = {} for space_info in spaces: repo_id = space_info.id resolved_config_file = hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space") with open(resolved_config_file, encoding="utf-8") as reader: config = json.load(reader) task = repo_id.split("/")[-1] tools[config["name"]] = PreTool(task=task, description=config["description"], repo_id=repo_id) return tools def _setup_default_tools(): global HUGGINGFACE_DEFAULT_TOOLS global _tools_are_initialized if _tools_are_initialized: return main_module = importlib.import_module("transformers") tools_module = main_module.tools remote_tools = get_remote_tools() for task_name, tool_class_name in TASK_MAPPING.items(): tool_class = getattr(tools_module, tool_class_name) description = tool_class.description HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(task=task_name, description=description, repo_id=None) if not is_offline_mode(): for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB: found = False for tool_name, tool in remote_tools.items(): if tool.task == task_name: HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool found = True break if not found: raise ValueError(f"{task_name} is not implemented on the Hub.") _tools_are_initialized = True def resolve_tools(code, toolbox, remote=False, cached_tools=None): if cached_tools is None: resolved_tools = BASE_PYTHON_TOOLS.copy() else: resolved_tools = cached_tools for name, tool in toolbox.items(): if name not in code or name in resolved_tools: continue if isinstance(tool, Tool): resolved_tools[name] = tool else: task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id _remote = remote and supports_remote(task_or_repo_id) resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote) return resolved_tools def get_tool_creation_code(code, toolbox, remote=False): code_lines = ["from transformers import load_tool", ""] for name, tool in toolbox.items(): if name not in code or isinstance(tool, Tool): continue task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id line = f'{name} = load_tool("{task_or_repo_id}"' if remote: line += ", remote=True" line += ")" code_lines.append(line) return "\n".join(code_lines) + "\n" def clean_code_for_chat(result): lines = result.split("\n") idx = 0 while idx < len(lines) and not lines[idx].lstrip().startswith("```"): idx += 1 explanation = "\n".join(lines[:idx]).strip() if idx == len(lines): return explanation, None idx += 1 start_idx = idx while not lines[idx].lstrip().startswith("```"): idx += 1 code = "\n".join(lines[start_idx:idx]).strip() return explanation, code def clean_code_for_run(result): result = f"I will use the following {result}" explanation, code = result.split("Answer:") explanation = explanation.strip() code = code.strip() code_lines = code.split("\n") if code_lines[0] in ["```", "```py", "```python"]: code_lines = code_lines[1:] if code_lines[-1] == "```": code_lines = code_lines[:-1] code = "\n".join(code_lines) return explanation, code class Agent: def __init__(self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): _setup_default_tools() agent_name = self.__class__.__name__ self.chat_prompt_template = download_prompt(chat_prompt_template, agent_name, mode="chat") self.run_prompt_template = download_prompt(run_prompt_template, agent_name, mode="run") self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy() self.log = print if additional_tools is not None: if isinstance(additional_tools, (list, tuple)): additional_tools = {t.name: t for t in additional_tools} elif not isinstance(additional_tools, dict): additional_tools = {additional_tools.name: additional_tools} replacements = {name: tool for name, tool in additional_tools.items() if name in HUGGINGFACE_DEFAULT_TOOLS} self._toolbox.update(additional_tools) if len(replacements) > 1: names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()]) logger.warning( f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}." ) elif len(replacements) == 1: name = list(replacements.keys())[0] logger.warning(f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`.") self.prepare_for_new_chat() @property def toolbox(self) -> Dict[str, Tool]: return self._toolbox def format_prompt(self, task, chat_mode=False): description = "\n".join([f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]) if chat_mode: if self.chat_history is None: prompt = self.chat_prompt_template.replace("<<all_tools>>", description) else: prompt = self.chat_history prompt += CHAT_MESSAGE_PROMPT.replace("<<task>>", task) else: prompt = self.run_prompt_template.replace("<<all_tools>>", description) prompt = prompt.replace("<<prompt>>", task) return prompt def set_stream(self, streamer): self.log = streamer def chat(self, task, *, return_code=False, remote=False, **kwargs): prompt = self.format_prompt(task, chat_mode=True) result = self.generate_one(prompt, stop=["Human:", "====="]) self.chat_history = prompt + result.strip() + "\n" explanation, code = clean_code_for_chat(result) self.log(f"==Explanation from the agent==\n{explanation}") if code is not None: self.log(f"\n\n==Code generated by the agent==\n{code}") if not return_code: self.log("\n\n==Result==") self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) self.chat_state.update(kwargs) return evaluate(code, self.cached_tools, self.chat_state, chat_mode=True) else: tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) return f"{tool_code}\n{code}" def prepare_for_new_chat(self): self.chat_history = None self.chat_state = {} self.cached_tools = None def run(self, task, *, return_code=False, remote=False, **kwargs): prompt = self.format_prompt(task) result = self.generate_one(prompt, stop=["Task:"]) explanation, code = clean_code_for_run(result) self.log(f"==Explanation from the agent==\n{explanation}") self.log(f"\n\n==Code generated by the agent==\n{code}") if not return_code: self.log("\n\n==Result==") self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) return evaluate(code, self.cached_tools, state=kwargs.copy()) else: tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) return f"{tool_code}\n{code}" def generate_one(self, prompt, stop): raise NotImplementedError def generate_many(self, prompts, stop): return [self.generate_one(prompt, stop) for prompt in prompts] class OpenAiAgent(Agent): def __init__( self, model="text-davinci-003", api_key=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, ): if not is_openai_available(): raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") if api_key is None: api_key = os.environ.get("OPENAI_API_KEY", None) if api_key is None: raise ValueError( "You need an openai key to use `OpenAIAgent`. You can get one here: Get one here " "https://openai.com/api/`. If you have one, set it in your env with `os.environ['OPENAI_API_KEY'] = " "xxx." ) else: openai.api_key = api_key self.model = model super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) def generate_many(self, prompts, stop): if "gpt" in self.model: return [self._chat_generate(prompt, stop) for prompt in prompts] else: return self._completion_generate(prompts, stop) def generate_one(self, prompt, stop): if "gpt" in self.model: return self._chat_generate(prompt, stop) else: return self._completion_generate([prompt], stop)[0] def _chat_generate(self, prompt, stop): result = openai.chat.completions.create( model=self.model, messages=[{"role": "user", "content": prompt}], temperature=0, stop=stop, ) return result.choices[0].message.content def _completion_generate(self, prompts, stop): result = openai.Completion.create( model=self.model, prompt=prompts, temperature=0, stop=stop, max_tokens=200, ) return [answer["text"] for answer in result["choices"]] class AzureOpenAiAgent(Agent): def __init__( self, deployment_id, api_key=None, resource_name=None, api_version="2022-12-01", is_chat_model=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None, ): if not is_openai_available(): raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") self.deployment_id = deployment_id openai.api_type = "azure" if api_key is None: api_key = os.environ.get("AZURE_OPENAI_API_KEY", None) if api_key is None: raise ValueError( "You need an Azure openAI key to use `AzureOpenAIAgent`. If you have one, set it in your env with " "`os.environ['AZURE_OPENAI_API_KEY'] = xxx." ) else: openai.api_key = api_key if resource_name is None: resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None) if resource_name is None: raise ValueError( "You need a resource_name to use `AzureOpenAIAgent`. If you have one, set it in your env with " "`os.environ['AZURE_OPENAI_RESOURCE_NAME'] = xxx." ) else: openai.api_base = f"https://{resource_name}.openai.azure.com" openai.api_version = api_version if is_chat_model is None: is_chat_model = "gpt" in deployment_id.lower() self.is_chat_model = is_chat_model super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) def generate_many(self, prompts, stop): if self.is_chat_model: return [self._chat_generate(prompt, stop) for prompt in prompts] else: return self._completion_generate(prompts, stop) def generate_one(self, prompt, stop): if self.is_chat_model: return self._chat_generate(prompt, stop) else: return self._completion_generate([prompt], stop)[0] def _chat_generate(self, prompt, stop): result = openai.ChatCompletion.create( engine=self.deployment_id, messages=[{"role": "user", "content": prompt}], temperature=0, stop=stop, ) return result["choices"][0]["message"]["content"] def _completion_generate(self, prompts, stop): result = openai.Completion.create( engine=self.deployment_id, prompt=prompts, temperature=0, stop=stop, max_tokens=200, ) return [answer["text"] for answer in result["choices"]] class HfAgent(Agent): def __init__( self, url_endpoint, token=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None ): self.url_endpoint = url_endpoint if token is None: self.token = f"Bearer {HfFolder().get_token()}" elif token.startswith("Bearer") or token.startswith("Basic"): self.token = token else: self.token = f"Bearer {token}" super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) def generate_one(self, prompt, stop): headers = {"Authorization": self.token} inputs = { "inputs": prompt, "parameters": {"max_new_tokens": 200, "return_full_text": False, "stop": stop}, } response = requests.post(self.url_endpoint, json=inputs, headers=headers) if response.status_code == 429: logger.info("Getting rate-limited, waiting a tiny bit before trying again.") time.sleep(1) return self._generate_one(prompt) elif response.status_code != 200: raise ValueError(f"Error {response.status_code}: {response.json()}") result = response.json()[0]["generated_text"] for stop_seq in stop: if result.endswith(stop_seq): return result[: -len(stop_seq)] return result class LocalAgent(Agent): def __init__(self, model, tokenizer, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): self.model = model self.tokenizer = tokenizer super().__init__( chat_prompt_template=chat_prompt_template, run_prompt_template=run_prompt_template, additional_tools=additional_tools, ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, **kwargs) tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) return cls(model, tokenizer) @property def _model_device(self): if hasattr(self.model, "hf_device_map"): return list(self.model.hf_device_map.values())[0] for param in self.model.parameters(): return param.device def generate_one(self, prompt, stop): encoded_inputs = self.tokenizer(prompt, return_tensors="pt").to(self._model_device) src_len = encoded_inputs["input_ids"].shape[1] stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(stop, self.tokenizer)]) outputs = self.model.generate( encoded_inputs["input_ids"], max_new_tokens=200, stopping_criteria=stopping_criteria ) result = self.tokenizer.decode(outputs[0].tolist()[src_len:]) for stop_seq in stop: if result.endswith(stop_seq): result = result[: -len(stop_seq)] return result class StopSequenceCriteria(StoppingCriteria): def __init__(self, stop_sequences, tokenizer): if isinstance(stop_sequences, str): stop_sequences = [stop_sequences] self.stop_sequences = stop_sequences self.tokenizer = tokenizer def __call__(self, input_ids, scores, **kwargs) -> bool: decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences)
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license remove first task start token
import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class DocumentQuestionAnsweringTool(PipelineTool): default_checkpoint = "naver-clova-ix/donut-base-finetuned-docvqa" description = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) name = "document_qa" pre_processor_class = AutoProcessor model_class = VisionEncoderDecoderModel inputs = ["image", "text"] outputs = ["text"] def __init__(self, *args, **kwargs): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*args, **kwargs) def encode(self, document: "Image", question: str): task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" prompt = task_prompt.replace("{user_input}", question) decoder_input_ids = self.pre_processor.tokenizer( prompt, add_special_tokens=False, return_tensors="pt" ).input_ids pixel_values = self.pre_processor(document, return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def forward(self, inputs): return self.model.generate( inputs["pixel_values"].to(self.device), decoder_input_ids=inputs["decoder_input_ids"].to(self.device), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=True, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=True, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=True, ).sequences def decode(self, outputs): sequence = self.pre_processor.batch_decode(outputs)[0] sequence = sequence.replace(self.pre_processor.tokenizer.eos_token, "") sequence = sequence.replace(self.pre_processor.tokenizer.pad_token, "") sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() sequence = self.pre_processor.token2json(sequence) return sequence["answer"]
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from typing import TYPE_CHECKING from ..models.auto import AutoModelForVision2Seq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class ImageCaptioningTool(PipelineTool): default_checkpoint = "Salesforce/blip-image-captioning-base" description = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) name = "image_captioner" model_class = AutoModelForVision2Seq inputs = ["image"] outputs = ["text"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) super().__init__(*args, **kwargs) def encode(self, image: "Image"): return self.pre_processor(images=image, return_tensors="pt") def forward(self, inputs): return self.model.generate(**inputs) def decode(self, outputs): return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class ImageQuestionAnsweringTool(PipelineTool): default_checkpoint = "dandelin/vilt-b32-finetuned-vqa" description = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) name = "image_qa" pre_processor_class = AutoProcessor model_class = AutoModelForVisualQuestionAnswering inputs = ["image", "text"] outputs = ["text"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) super().__init__(*args, **kwargs) def encode(self, image: "Image", question: str): return self.pre_processor(image, question, return_tensors="pt") def forward(self, inputs): with torch.no_grad(): return self.model(**inputs).logits def decode(self, outputs): idx = outputs.argmax(-1).item() return self.model.config.id2label[idx]
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class ImageSegmentationTool(PipelineTool): description = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image. " "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) default_checkpoint = "CIDAS/clipseg-rd64-refined" name = "image_segmenter" model_class = CLIPSegForImageSegmentation inputs = ["image", "text"] outputs = ["image"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) super().__init__(*args, **kwargs) def encode(self, image: "Image", label: str): return self.pre_processor(text=[label], images=[image], padding=True, return_tensors="pt") def forward(self, inputs): with torch.no_grad(): logits = self.model(**inputs).logits return logits def decode(self, outputs): array = outputs.cpu().detach().numpy() array[array <= 0] = 0 array[array > 0] = 1 return Image.fromarray((array * 255).astype(np.uint8))
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license an error raised when the interpretor cannot evaluate a python expression due to syntax error or unsupported operations evaluate a python expression using the content of the variables stored in a state and only evaluating a given set of functions this function will recurse through the nodes of the tree provided args code str the code to evaluate tools dictstr callable the functions that may be called during the evaluation any call to another function will fail with an interpretorerror state dictstr any a dictionary mapping variable names to values the state should contain the initial inputs but will be updated by this function to contain all variables as they are evaluated chatmode bool optional defaults to false whether or not the function is called from agent chat evaluate an absract syntax tree using the content of the variables stored in a state and only evaluating a given set of functions this function will recurse trough the nodes of the tree provided args expression ast ast the code to evaluate as an abastract syntax tree state dictstr any a dictionary mapping variable names to values the state is updated if need be when the evaluation encounters assignements tools dictstr callable the functions that may be called during the evaluation any call to another function will fail with an interpretorerror assignement we evaluate the assignement which should update the state we return the variable assigned as it may be used to determine the final result function call we return the value of the function call constant just return the value dict evaluate all keys and values expression evaluate the content for loop execute the loop formatted value part of fstring evaluate the content and return if execute the right branch list evaluate all elements name pick up the value in the state subscript return the value of the indexing for now we refuse anything else let s add things as we need them todo deal with args usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license an error raised when the interpretor cannot evaluate a python expression due to syntax error or unsupported operations evaluate a python expression using the content of the variables stored in a state and only evaluating a given set of functions this function will recurse through the nodes of the tree provided args code str the code to evaluate tools dict str callable the functions that may be called during the evaluation any call to another function will fail with an interpretorerror state dict str any a dictionary mapping variable names to values the state should contain the initial inputs but will be updated by this function to contain all variables as they are evaluated chat_mode bool optional defaults to false whether or not the function is called from agent chat evaluate an absract syntax tree using the content of the variables stored in a state and only evaluating a given set of functions this function will recurse trough the nodes of the tree provided args expression ast ast the code to evaluate as an abastract syntax tree state dict str any a dictionary mapping variable names to values the state is updated if need be when the evaluation encounters assignements tools dict str callable the functions that may be called during the evaluation any call to another function will fail with an interpretorerror assignement we evaluate the assignement which should update the state we return the variable assigned as it may be used to determine the final result function call we return the value of the function call constant just return the value dict evaluate all keys and values expression evaluate the content for loop execute the loop formatted value part of f string evaluate the content and return if execute the right branch list evaluate all elements name pick up the value in the state subscript return the value of the indexing for now we refuse anything else let s add things as we need them todo deal with args
import ast import difflib from collections.abc import Mapping from typing import Any, Callable, Dict class InterpretorError(ValueError): pass def evaluate(code: str, tools: Dict[str, Callable], state=None, chat_mode=False): try: expression = ast.parse(code) except SyntaxError as e: print("The code generated by the agent is not valid.\n", e) return if state is None: state = {} result = None for idx, node in enumerate(expression.body): try: line_result = evaluate_ast(node, state, tools) except InterpretorError as e: msg = f"Evaluation of the code stopped at line {idx} before the end because of the following error" if chat_mode: msg += ( f". Copy paste the following error message and send it back to the agent:\nI get an error: '{e}'" ) else: msg += f":\n{e}" print(msg) break if line_result is not None: result = line_result return result def evaluate_ast(expression: ast.AST, state: Dict[str, Any], tools: Dict[str, Callable]): if isinstance(expression, ast.Assign): return evaluate_assign(expression, state, tools) elif isinstance(expression, ast.Call): return evaluate_call(expression, state, tools) elif isinstance(expression, ast.Constant): return expression.value elif isinstance(expression, ast.Dict): keys = [evaluate_ast(k, state, tools) for k in expression.keys] values = [evaluate_ast(v, state, tools) for v in expression.values] return dict(zip(keys, values)) elif isinstance(expression, ast.Expr): return evaluate_ast(expression.value, state, tools) elif isinstance(expression, ast.For): return evaluate_for(expression, state, tools) elif isinstance(expression, ast.FormattedValue): return evaluate_ast(expression.value, state, tools) elif isinstance(expression, ast.If): return evaluate_if(expression, state, tools) elif hasattr(ast, "Index") and isinstance(expression, ast.Index): return evaluate_ast(expression.value, state, tools) elif isinstance(expression, ast.JoinedStr): return "".join([str(evaluate_ast(v, state, tools)) for v in expression.values]) elif isinstance(expression, ast.List): return [evaluate_ast(elt, state, tools) for elt in expression.elts] elif isinstance(expression, ast.Name): return evaluate_name(expression, state, tools) elif isinstance(expression, ast.Subscript): return evaluate_subscript(expression, state, tools) else: raise InterpretorError(f"{expression.__class__.__name__} is not supported.") def evaluate_assign(assign, state, tools): var_names = assign.targets result = evaluate_ast(assign.value, state, tools) if len(var_names) == 1: state[var_names[0].id] = result else: if len(result) != len(var_names): raise InterpretorError(f"Expected {len(var_names)} values but got {len(result)}.") for var_name, r in zip(var_names, result): state[var_name.id] = r return result def evaluate_call(call, state, tools): if not isinstance(call.func, ast.Name): raise InterpretorError( f"It is not permitted to evaluate other functions than the provided tools (tried to execute {call.func} of " f"type {type(call.func)}." ) func_name = call.func.id if func_name not in tools: raise InterpretorError( f"It is not permitted to evaluate other functions than the provided tools (tried to execute {call.func.id})." ) func = tools[func_name] args = [evaluate_ast(arg, state, tools) for arg in call.args] kwargs = {keyword.arg: evaluate_ast(keyword.value, state, tools) for keyword in call.keywords} return func(*args, **kwargs) def evaluate_subscript(subscript, state, tools): index = evaluate_ast(subscript.slice, state, tools) value = evaluate_ast(subscript.value, state, tools) if isinstance(value, (list, tuple)): return value[int(index)] if index in value: return value[index] if isinstance(index, str) and isinstance(value, Mapping): close_matches = difflib.get_close_matches(index, list(value.keys())) if len(close_matches) > 0: return value[close_matches[0]] raise InterpretorError(f"Could not index {value} with '{index}'.") def evaluate_name(name, state, tools): if name.id in state: return state[name.id] close_matches = difflib.get_close_matches(name.id, list(state.keys())) if len(close_matches) > 0: return state[close_matches[0]] raise InterpretorError(f"The variable `{name.id}` is not defined.") def evaluate_condition(condition, state, tools): if len(condition.ops) > 1: raise InterpretorError("Cannot evaluate conditions with multiple operators") left = evaluate_ast(condition.left, state, tools) comparator = condition.ops[0] right = evaluate_ast(condition.comparators[0], state, tools) if isinstance(comparator, ast.Eq): return left == right elif isinstance(comparator, ast.NotEq): return left != right elif isinstance(comparator, ast.Lt): return left < right elif isinstance(comparator, ast.LtE): return left <= right elif isinstance(comparator, ast.Gt): return left > right elif isinstance(comparator, ast.GtE): return left >= right elif isinstance(comparator, ast.Is): return left is right elif isinstance(comparator, ast.IsNot): return left is not right elif isinstance(comparator, ast.In): return left in right elif isinstance(comparator, ast.NotIn): return left not in right else: raise InterpretorError(f"Operator not supported: {comparator}") def evaluate_if(if_statement, state, tools): result = None if evaluate_condition(if_statement.test, state, tools): for line in if_statement.body: line_result = evaluate_ast(line, state, tools) if line_result is not None: result = line_result else: for line in if_statement.orelse: line_result = evaluate_ast(line, state, tools) if line_result is not None: result = line_result return result def evaluate_for(for_loop, state, tools): result = None iterator = evaluate_ast(for_loop.iter, state, tools) for counter in iterator: state[for_loop.target.id] = counter for expression in for_loop.body: line_result = evaluate_ast(expression, state, tools) if line_result is not None: result = line_result return result
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class SpeechToTextTool(PipelineTool): default_checkpoint = "openai/whisper-base" description = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) name = "transcriber" pre_processor_class = WhisperProcessor model_class = WhisperForConditionalGeneration inputs = ["audio"] outputs = ["text"] def encode(self, audio): return self.pre_processor(audio, return_tensors="pt").input_features def forward(self, inputs): return self.model.generate(inputs=inputs) def decode(self, outputs): return self.pre_processor.batch_decode(outputs, skip_special_tokens=True)[0]
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license example py from transformers tools import textclassificationtool classifier textclassificationtool classifierthis is a super nice api labelspositive negative usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license example py from transformers tools import textclassificationtool classifier textclassificationtool classifier this is a super nice api labels positive negative
import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class TextClassificationTool(PipelineTool): default_checkpoint = "facebook/bart-large-mnli" description = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) name = "text_classifier" pre_processor_class = AutoTokenizer model_class = AutoModelForSequenceClassification inputs = ["text", ["text"]] outputs = ["text"] def setup(self): super().setup() config = self.model.config self.entailment_id = -1 for idx, label in config.id2label.items(): if label.lower().startswith("entail"): self.entailment_id = int(idx) if self.entailment_id == -1: raise ValueError("Could not determine the entailment ID from the model config, please pass it at init.") def encode(self, text, labels): self._labels = labels return self.pre_processor( [text] * len(labels), [f"This example is {label}" for label in labels], return_tensors="pt", padding="max_length", ) def decode(self, outputs): logits = outputs.logits label_id = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license example py from transformers tools import textsummarizationtool summarizer textsummarizationtool summarizerlongtext usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license example py from transformers tools import textsummarizationtool summarizer textsummarizationtool summarizer long_text
from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer from .base import PipelineTool class TextSummarizationTool(PipelineTool): default_checkpoint = "philschmid/bart-large-cnn-samsum" description = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) name = "summarizer" pre_processor_class = AutoTokenizer model_class = AutoModelForSeq2SeqLM inputs = ["text"] outputs = ["text"] def encode(self, text): return self.pre_processor(text, return_tensors="pt", truncation=True) def forward(self, inputs): return self.model.generate(**inputs)[0] def decode(self, outputs): return self.pre_processor.decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True)
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import torch from ..models.speecht5 import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class TextToSpeechTool(PipelineTool): default_checkpoint = "microsoft/speecht5_tts" description = ( "This is a tool that reads an English text out loud. It takes an input named `text` which should contain the " "text to read (in English) and returns a waveform object containing the sound." ) name = "text_reader" pre_processor_class = SpeechT5Processor model_class = SpeechT5ForTextToSpeech post_processor_class = SpeechT5HifiGan inputs = ["text"] outputs = ["audio"] def setup(self): if self.post_processor is None: self.post_processor = "microsoft/speecht5_hifigan" super().setup() def encode(self, text, speaker_embeddings=None): inputs = self.pre_processor(text=text, return_tensors="pt", truncation=True) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("Datasets needs to be installed if not passing speaker embeddings.") embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7305]["xvector"]).unsqueeze(0) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def forward(self, inputs): with torch.no_grad(): return self.model.generate_speech(**inputs) def decode(self, outputs): with torch.no_grad(): return self.post_processor(outputs).cpu().detach()
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license example py from transformers tools import translationtool translator translationtool translatorthis is a super nice api srclangenglish tgtlangfrench usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license example py from transformers tools import translationtool translator translationtool translator this is a super nice api src_lang english tgt_lang french
from ..models.auto import AutoModelForSeq2SeqLM, AutoTokenizer from .base import PipelineTool LANGUAGE_CODES = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class TranslationTool(PipelineTool): default_checkpoint = "facebook/nllb-200-distilled-600M" description = ( "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should " "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, " "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in " "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`." ) name = "translator" pre_processor_class = AutoTokenizer model_class = AutoModelForSeq2SeqLM lang_to_code = LANGUAGE_CODES inputs = ["text", "text", "text"] outputs = ["text"] def encode(self, text, src_lang, tgt_lang): if src_lang not in self.lang_to_code: raise ValueError(f"{src_lang} is not a supported language.") if tgt_lang not in self.lang_to_code: raise ValueError(f"{tgt_lang} is not a supported language.") src_lang = self.lang_to_code[src_lang] tgt_lang = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( text, return_tensors="pt", src_lang=src_lang, tgt_lang=tgt_lang ) def forward(self, inputs): return self.model.generate(**inputs) def decode(self, outputs): return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=True)
codingutf8 2020present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the trainer class to easily train a transformers from scratch or finetune it on a new task integrations must be imported before ml frameworks isort off isort on name of the files used for checkpointing trainer is a simple but featurecomplete training and eval loop for pytorch optimized for transformers args model pretrainedmodel or torch nn module optional the model to train evaluate or use for predictions if not provided a modelinit must be passed tip trainer is optimized to work with the pretrainedmodel provided by the library you can still use your own models defined as torch nn module as long as they work the same way as the transformers models tip args trainingarguments optional the arguments to tweak for training will default to a basic instance of trainingarguments with the outputdir set to a directory named tmptrainer in the current directory if not provided datacollator datacollator optional the function to use to form a batch from a list of elements of traindataset or evaldataset will default to defaultdatacollator if no tokenizer is provided an instance of datacollatorwithpadding otherwise traindataset torch utils data dataset or torch utils data iterabledataset optional the dataset to use for training if it is a datasets dataset columns not accepted by the model forward method are automatically removed note that if it s a torch utils data iterabledataset with some randomization and you are training in a distributed fashion your iterable dataset should either use a internal attribute generator that is a torch generator for the randomization that must be identical on all processes and the trainer will manually set the seed of this generator at each epoch or have a setepoch method that internally sets the seed of the rngs used evaldataset uniontorch utils data dataset dictstr torch utils data dataset optional the dataset to use for evaluation if it is a datasets dataset columns not accepted by the model forward method are automatically removed if it is a dictionary it will evaluate on each dataset prepending the dictionary key to the metric name tokenizer pretrainedtokenizerbase optional the tokenizer used to preprocess the data if provided will be used to automatically pad the inputs to the maximum length when batching inputs and it will be saved along the model to make it easier to rerun an interrupted training or reuse the finetuned model modelinit callable pretrainedmodel optional a function that instantiates the model to be used if provided each call to trainer train will start from a new instance of the model as given by this function the function may have zero argument or a single one containing the optunaray tunesigopt trial object to be able to choose different architectures according to hyper parameters such as layer count sizes of inner layers dropout probabilities etc computemetrics callableevalprediction dict optional the function that will be used to compute metrics at evaluation must take a evalprediction and return a dictionary string to metric values callbacks list of trainercallback optional a list of callbacks to customize the training loop will add those to the list of default callbacks detailed in herecallback if you want to remove one of the default callbacks used use the trainer removecallback method optimizers tupletorch optim optimizer torch optim lrscheduler lambdalr optional defaults to none none a tuple containing the optimizer and the scheduler to use will default to an instance of adamw on your model and a scheduler given by getlinearschedulewithwarmup controlled by args preprocesslogitsformetrics callabletorch tensor torch tensor torch tensor optional a function that preprocess the logits right before caching them at each evaluation step must take two tensors the logits and the labels and return the logits once processed as desired the modifications made by this function will be reflected in the predictions received by computemetrics note that the labels second parameter will be none if the dataset does not have them important attributes model always points to the core model if using a transformers model it will be a pretrainedmodel subclass modelwrapped always points to the most external model in case one or more other modules wrap the original model this is the model that should be used for the forward pass for example under deepspeed the inner model is wrapped in deepspeed and then again in torch nn distributeddataparallel if the inner model hasn t been wrapped then self modelwrapped is the same as self model ismodelparallel whether or not a model has been switched to a model parallel mode different from data parallelism this means some of the model layers are split on different gpus placemodelondevice whether or not to automatically place the model on the device it will be set to false if model parallel or deepspeed is used or if the default trainingarguments placemodelondevice is overridden to return false isintrain whether or not a model is currently running train e g when evaluate is called while in train those are used as methods of the trainer in examples seed must be set before instantiating the model when using model memory metrics must set up as early as possible set the correct log level depending on the node force device and distributed setup init explicitly warn users at this stage the model is already loaded one place to sort out whether to place the model on device or not postpone switching model to cuda when 1 mp since we are trying to fit a much bigger than 1 gpu model 2 fp16enabled deepspeed loads the model in half the size and it doesn t need to anyway and we only use deepspeed for training at the moment 3 full bf16 or fp16 eval since the model needs to be cast to the right dtype first 4 fsdp same as mp bnb quantized models doesn t support to operation force ngpu to 1 to avoid dataparallel as mp will manage the gpus later use self model is self modelwrapped to check if it s wrapped or not will be set to true by self setuploggers on first call to self log create distant repo and output directory if needed mixed precision setup mixed precision setup for sagemaker model parallel bf16 model parallelism in sagemaker currently not supported raise an error when there s mismatch between smp config and trainer argument use smp config as truth smp 1 10 does not support fp16 in trainer deepspeed and sagemaker model parallel manage their own half precision label smoothing internal variable to count flos in each process will be accumulated in self state totalflos then returned to 0 every time flos need to be logged internal variables to help with automatic batch size reduction very last torch compile unwrappedmodel unwrapmodelmodel if ispeftavailable and isinstanceunwrappedmodel peftmodel embeddings unwrappedmodel basemodel model getinputembeddings else embeddings unwrappedmodel getinputembeddings del unwrappedmodel embeddings neftunenoisealpha self neftunenoisealpha hookhandle embeddings registerforwardhookneftunepostforwardhook self neftunehookhandle hookhandle return model def deactivateneftuneself model if not hasattrself neftunehookhandle raise valueerrorneftune is not activated make sure to call trainer activateneftune first unwrappedmodel unwrapmodelmodel if ispeftavailable and isinstanceunwrappedmodel peftmodel embeddings unwrappedmodel basemodel model getinputembeddings else embeddings unwrappedmodel getinputembeddings self neftunehookhandle remove del embeddings neftunenoisealpha unwrappedmodel def addcallbackself callback self callbackhandler addcallbackcallback def popcallbackself callback return self callbackhandler popcallbackcallback def removecallbackself callback self callbackhandler removecallbackcallback def movemodeltodeviceself model device model model todevice moving a model to an xla device disconnects the tied weights so we have to retie them if self args parallelmode parallelmode tpu and hasattrmodel tieweights model tieweights def setsignaturecolumnsifneededself if self signaturecolumns is none inspect model forward signature to keep only the arguments it accepts signature inspect signatureself model forward self signaturecolumns listsignature parameters keys labels may be named label or labelids the default data collator handles that self signaturecolumns listsetlabel labelids self labelnames def removeunusedcolumnsself dataset datasets dataset description optionalstr none if not self args removeunusedcolumns return dataset self setsignaturecolumnsifneeded signaturecolumns self signaturecolumns ignoredcolumns listsetdataset columnnames setsignaturecolumns if lenignoredcolumns 0 dsetdescription if description is none else fin the description set logger info fthe following columns dsetdescription don t have a corresponding argument in fself model class name forward and have been ignored joinignoredcolumns f if joinignoredcolumns are not expected by self model class name forward you can safely ignore this message columns k for k in signaturecolumns if k in dataset columnnames if version parsedatasets version version parse1 4 0 dataset setformat typedataset formattype columnscolumns formatkwargsdataset formatformatkwargs return dataset else return dataset removecolumnsignoredcolumns def getcollatorwithremovedcolumns self datacollator callable description optionalstr none callable build the sampler returns the training torch utils data dataloader will use no sampler if traindataset does not implement len a random sampler adapted to distributed training if necessary otherwise subclass and override this method if you want to inject some custom behavior deprecated code returns the evaluation torch utils data dataloader subclass and override this method if you want to inject some custom behavior args evaldataset torch utils data dataset optional if provided will override self evaldataset if it is a datasets dataset columns not accepted by the model forward method are automatically removed it must implement len returns the test torch utils data dataloader subclass and override this method if you want to inject some custom behavior args testdataset torch utils data dataset optional the test dataset to use if it is a datasets dataset columns not accepted by the model forward method are automatically removed it must implement len we use the same batchsize as for eval setup the optimizer and the learning rate scheduler we provide a reasonable default that works well if you want to use something else you can pass a tuple in the trainer s init through optimizers or subclass and override this method or createoptimizer andor createscheduler in a subclass if smp 1 10 and fp16 is enabled we unwrap the optimizer get all parameter names that weight decay will be applied to note that some models implement their own layernorm instead of calling nn layernorm weight decay could still apply to those modules since this function only filter out instance of nn layernorm setup the optimizer we provide a reasonable default that works well if you want to use something else you can pass a tuple in the trainer s init through optimizers or subclass and override this method in a subclass returns the optimizer class and optimizer parameters based on the training arguments args args transformers trainingargs trainingarguments the training arguments for the training session parse args optimargs todo change dtypes back to mfp32 var bf16 kahan false once they can be cast together in torchdistx setup the scheduler the optimizer of the trainer must have been set up either before this method is called or passed as an argument args numtrainingsteps int the number of training steps to do helper to get number of samples in a torch utils data dataloader by accessing its dataset when dataloader dataset does not exist or has no length estimates as best it can special case for iterabledatasetshard we need to dig deeper helper to get number of tokens in a torch utils data dataloader by enumerating dataloader hp search setup code self trial trial if self hpsearchbackend is none or trial is none return if self hpsearchbackend hpsearchbackend optuna params self hpspacetrial elif self hpsearchbackend hpsearchbackend ray params trial params popwandb none elif self hpsearchbackend hpsearchbackend sigopt params k intv if isinstancev str else v for k v in trial assignments items elif self hpsearchbackend hpsearchbackend wandb params trial for key value in params items if not hasattrself args key logger warning ftrying to set key in the hyperparameter search but there is no corresponding field in trainingarguments continue oldattr getattrself args key none casting value to the proper type if oldattr is not none value typeoldattrvalue setattrself args key value if self hpsearchbackend hpsearchbackend optuna logger infoftrial trial params if self hpsearchbackend hpsearchbackend sigopt logger infofsigopt assignments trial assignments if self hpsearchbackend hpsearchbackend wandb logger infofwb sweep parameters trial if self isdeepspeedenabled if self args deepspeed is none raise valueerrorfor sweeps with deepspeed args deepspeed must be set rebuild the deepspeed config to reflect the updated training parameters from accelerate utils import deepspeedplugin from transformers integrations deepspeed import hftrainerdeepspeedconfig self args hfdeepspeedconfig hftrainerdeepspeedconfigself args deepspeed self args hfdeepspeedconfig trainerconfigprocessself args self args deepspeedplugin deepspeedpluginhfdsconfigself args hfdeepspeedconfig self createacceleratorandpostprocess def reporttohpsearchself trial unionoptuna trial dictstr any step int metrics dictstr float if self hpsearchbackend is none or trial is none return self objective self computeobjectivemetrics copy if self hpsearchbackend hpsearchbackend optuna import optuna if not trial study ismultiobjective trial reportself objective step if trial shouldprune self callbackhandler ontrainendself args self state self control raise optuna trialpruned elif self hpsearchbackend hpsearchbackend ray from ray import tune if self control shouldsave self tunesavecheckpoint tune reportobjectiveself objective metrics def tunesavecheckpointself from ray import tune if not self usetunecheckpoints return with tune checkpointdirstepself state globalstep as checkpointdir outputdir os path joincheckpointdir fprefixcheckpointdirself state globalstep self savemodeloutputdir internalcalltrue if self args shouldsave self state savetojsonos path joinoutputdir trainerstatename torch saveself optimizer statedict os path joinoutputdir optimizername torch saveself lrscheduler statedict os path joinoutputdir schedulername def callmodelinitself trialnone modelinitargcount numberofargumentsself modelinit if modelinitargcount 0 model self modelinit elif modelinitargcount 1 model self modelinittrial else raise runtimeerrormodelinit should have 0 or 1 argument if model is none raise runtimeerrormodelinit should not return none return model def torchjitmodelevalself model dataloader trainingfalse if not training if dataloader is none logger warningfailed to use pytorch jit mode due to current dataloader is none return model examplebatch nextiterdataloader examplebatch self prepareinputsexamplebatch try jitmodel copy copymodel jitmodel eval originalforward jitmodel dict poporiginalforward none remove mixed precision hooks from the model if originalforward jitmodel forward originalforward with self accelerator autocastcacheenabledfalse torch nograd if version parseversion parsetorch version baseversion version parse2 0 0 if isinstanceexamplebatch dict jitmodel torch jit tracejitmodel examplekwarginputsexamplebatch strictfalse else jitmodel torch jit trace jitmodel examplekwarginputskey examplebatchkey for key in examplebatch strictfalse else jitinputs for key in examplebatch exampletensor torch oneslikeexamplebatchkey jitinputs appendexampletensor jitinputs tuplejitinputs jitmodel torch jit tracejitmodel jitinputs strictfalse jitmodel torch jit freezejitmodel with torch nograd jitmodelexamplebatch jitmodelexamplebatch model jitmodel self usecpuamp false except runtimeerror typeerror valueerror nameerror indexerror as e logger warningffailed to use pytorch jit mode due to e return model def ipexoptimizemodelself model trainingfalse dtypetorch float32 if not isipexavailable raise importerror using ipex but ipex is not installed or ipex s version does not match current pytorch please refer to https github comintelintelextensionforpytorch import intelextensionforpytorch as ipex if not training model eval dtype torch bfloat16 if not self isintrain and self args bf16fulleval else dtype convbnfolding is disabled as it fails in symbolic tracing resulting in ipex warnings model ipex optimizemodel dtypedtype levelo1 convbnfoldingfalse inplacenot self isintrain else if not model training model train model self optimizer ipex optimize model dtypedtype optimizerself optimizer inplacetrue levelo1 return model def wrapmodelself model trainingtrue dataloadernone if self args useipex dtype torch bfloat16 if self usecpuamp else torch float32 model self ipexoptimizemodelmodel training dtypedtype if issagemakermpenabled wrapping the base model twice in a distributedmodel will raise an error if isinstanceself modelwrapped smp model distributedmodel return self modelwrapped return smp distributedmodelmodel backwardpassesperstepself args gradientaccumulationsteps traineval could be run multipletimes if already wrapped don t rewrap it again if unwrapmodelmodel is not model return model mixed precision training with apex torch 1 6 if self useapex and training model self optimizer amp initializemodel self optimizer optlevelself args fp16optlevel multigpu training should be after apex fp16 initialization 8bit models does not support ddp if self args ngpu 1 and not getattrmodel isloadedin8bit false model nn dataparallelmodel if self args jitmodeeval starttime time time model self torchjitmodelevalmodel dataloader training self jitcompilationtime roundtime time starttime 4 note in torch distributed mode there s no point in wrapping the model inside a distributeddataparallel as we ll be under nograd anyways if not training return model distributed training should be after apex fp16 initialization distributed training using pytorch fsdp if self isfsdpxlaenabled try from torchxla distributed fsdp import xlafullyshardeddataparallel as fsdp from torchxla distributed fsdp import checkpointmodule from torchxla distributed fsdp wrap import sizebasedautowrappolicy transformerautowrappolicy except importerror raise importerrormissing xla fsdp related module please make sure to use torchxla 2 0 autowrappolicy none autowrappercallable none defaulttransformerclsnamestowrap getattrmodel nosplitmodules none fsdptransformerlayerclstowrap self args fsdpconfig get transformerlayerclstowrap defaulttransformerclsnamestowrap if self args fsdpconfigminnumparams 0 autowrappolicy functools partial sizebasedautowrappolicy minnumparamsself args fsdpconfigminnumparams elif fsdptransformerlayerclstowrap is not none transformerclstowrap set for layerclass in fsdptransformerlayerclstowrap transformercls getmoduleclassfromnamemodel layerclass if transformercls is none raise exceptioncould not find the transformer layer class to wrap in the model else transformerclstowrap addtransformercls autowrappolicy functools partial transformerautowrappolicy transformer layer class to wrap transformerlayerclstransformerclstowrap fsdpkwargs self args xlafsdpconfig if self args fsdpconfigxlafsdpgradckpt apply gradient checkpointing to autowrapped submodules if specified def autowrappercallablem args kwargs return fsdpcheckpointmodulem args kwargs wrap the base model with an outer fsdp wrapper self model model fsdp model autowrappolicyautowrappolicy autowrappercallableautowrappercallable fsdpkwargs patch xm optimizerstep should not reduce gradients in this case as fsdp does not need gradient reduction over sharded parameters def patchedoptimizerstepoptimizer barrierfalse optimizerargs loss optimizer stepoptimizerargs if barrier xm markstep return loss xm optimizerstep patchedoptimizerstep elif issagemakerdpenabled model nn parallel distributeddataparallel model deviceidsintos getenvsmdataparallellocalrank elif self args parallelmode parallelmode distributed if istorchneuroncoreavailable return model kwargs if self args ddpfindunusedparameters is not none kwargsfindunusedparameters self args ddpfindunusedparameters elif isinstancemodel pretrainedmodel findunusedparameters breaks checkpointing as per https github comhuggingfacetransformerspull4659issuecomment643356021 kwargsfindunusedparameters not model isgradientcheckpointing else kwargsfindunusedparameters true if self args ddpbucketcapmb is not none kwargsbucketcapmb self args ddpbucketcapmb if self args ddpbroadcastbuffers is not none kwargsbroadcastbuffers self args ddpbroadcastbuffers self accelerator ddphandler distributeddataparallelkwargskwargs return model def train self resumefromcheckpoint optionalunionstr bool none trial unionoptuna trial dictstr any none ignorekeysforeval optionalliststr none kwargs if resumefromcheckpoint is false resumefromcheckpoint none memory metrics must set up as early as possible self memorytracker start args self args self isintrain true attach neftune hooks if necessary if self neftunenoisealpha is not none self model self activateneftuneself model dotrain is not a reliable argument as it might not be set and train still called so the following is a workaround if args fp16fulleval or args bf16fulleval and not args dotrain self movemodeltodeviceself model args device if modelpath in kwargs resumefromcheckpoint kwargs popmodelpath warnings warn modelpath is deprecated and will be removed in a future version use resumefromcheckpoint instead futurewarning if lenkwargs 0 raise typeerrorftrain received got unexpected keyword arguments joinlistkwargs keys this might change the seed so needs to run first self hpsearchsetuptrial self trainbatchsize self args trainbatchsize model reinit modelreloaded false if self modelinit is not none seed must be set before instantiating the model when using modelinit enablefulldeterminismself args seed if self args fulldeterminism else setseedself args seed self model self callmodelinittrial modelreloaded true reinitializes optimizer and scheduler self optimizer self lrscheduler none none load potential model checkpoint if isinstanceresumefromcheckpoint bool and resumefromcheckpoint resumefromcheckpoint getlastcheckpointargs outputdir if resumefromcheckpoint is none raise valueerrorfno valid checkpoint found in output directory args outputdir if resumefromcheckpoint is not none and not issagemakermpenabled and not self isdeepspeedenabled and not self isfsdpenabled self loadfromcheckpointresumefromcheckpoint if model was reinitialized put it on the right device and update self modelwrapped if modelreloaded if self placemodelondevice self movemodeltodeviceself model args device self modelwrapped self model innertrainingloop findexecutablebatchsize self innertrainingloop self trainbatchsize args autofindbatchsize if args pushtohub try disable progress bars when uploading models during checkpoints to avoid polluting stdout hfhubutils disableprogressbars return innertrainingloop argsargs resumefromcheckpointresumefromcheckpoint trialtrial ignorekeysforevalignorekeysforeval finally hfhubutils enableprogressbars else return innertrainingloop argsargs resumefromcheckpointresumefromcheckpoint trialtrial ignorekeysforevalignorekeysforeval def innertrainingloop self batchsizenone argsnone resumefromcheckpointnone trialnone ignorekeysforevalnone self accelerator freememory self trainbatchsize batchsize logger debugfcurrently training with a batch size of self trainbatchsize data loader and number of training steps traindataloader self gettraindataloader setting up training control variables number of training epochs numtrainepochs number of training steps per epoch numupdatestepsperepoch total number of training steps to execute maxsteps totaltrainbatchsize self trainbatchsize args gradientaccumulationsteps args worldsize lendataloader none numtraintokens none if haslengthtraindataloader lendataloader lentraindataloader numupdatestepsperepoch lendataloader args gradientaccumulationsteps numupdatestepsperepoch maxnumupdatestepsperepoch 1 numexamples self numexamplestraindataloader if args maxsteps 0 maxsteps args maxsteps numtrainepochs args maxsteps numupdatestepsperepoch int args maxsteps numupdatestepsperepoch 0 may be slightly incorrect if the last batch in the training dataloader has a smaller size but it s the best we can do numtrainsamples args maxsteps totaltrainbatchsize if args includetokenspersecond numtraintokens self numtokenstraindataloader args maxsteps args gradientaccumulationsteps else maxsteps math ceilargs numtrainepochs numupdatestepsperepoch numtrainepochs math ceilargs numtrainepochs numtrainsamples self numexamplestraindataloader args numtrainepochs if args includetokenspersecond numtraintokens self numtokenstraindataloader args numtrainepochs elif args maxsteps 0 rely on maxsteps when dataloader does not have a working size maxsteps args maxsteps setting a very large number of epochs so we go as many times as necessary over the iterator numtrainepochs sys maxsize numupdatestepsperepoch maxsteps numexamples totaltrainbatchsize args maxsteps numtrainsamples args maxsteps totaltrainbatchsize if args includetokenspersecond numtraintokens self numtokenstraindataloader args maxsteps args gradientaccumulationsteps else raise valueerror args maxsteps must be set to a positive value if dataloader does not have a length was f args maxsteps if debugoption underflowoverflow in self args debug if self args ngpu 1 nn dataparallelmodel replicates the model creating new variables and module references registered here no longer work on other gpus breaking the module raise valueerror currently debug underflowoverflow is not supported under dp please use ddp torchrun or torch distributed launch deprecated else debugoverflow debugunderflowoverflowself model noqa delayoptimizercreation issagemakermpenabled or self isfsdpxlaenabled or self isfsdpenabled we need to reset the scheduler as its parameters may be different on subsequent calls if self createdlrscheduler self lrscheduler none self createdlrscheduler false if self isdeepspeedenabled self optimizer self lrscheduler deepspeedinitself numtrainingstepsmaxsteps if not delayoptimizercreation self createoptimizerandschedulernumtrainingstepsmaxsteps self state trainerstate self state ishyperparamsearch trial is not none compute absolute values for logging eval and save if given as ratio if args loggingsteps is not none if args loggingsteps 1 self state loggingsteps math ceilmaxsteps args loggingsteps else self state loggingsteps args loggingsteps if args evalsteps is not none if args evalsteps 1 self state evalsteps math ceilmaxsteps args evalsteps else self state evalsteps args evalsteps if args savesteps is not none if args savesteps 1 self state savesteps math ceilmaxsteps args savesteps else self state savesteps args savesteps activate gradient checkpointing if needed if args gradientcheckpointing if args gradientcheckpointingkwargs is none gradientcheckpointingkwargs else gradientcheckpointingkwargs args gradientcheckpointingkwargs self model gradientcheckpointingenablegradientcheckpointingkwargsgradientcheckpointingkwargs model self wrapmodelself modelwrapped as the model is wrapped don t use accelerator prepare this is for unhandled cases such as fsdpxla sagemaker mpdp dataparallel ipex useacceleratorprepare true if model is self model else false if delayoptimizercreation self createoptimizerandschedulernumtrainingstepsmaxsteps prepare using accelerator prepare if useacceleratorprepare self model train if hasattrself lrscheduler step if self useapex model self accelerator prepareself model else model self optimizer self accelerator prepareself model self optimizer else to handle cases wherein we pass dummyscheduler such as when it is specified in deepspeed config model self optimizer self lrscheduler self accelerator prepare self model self optimizer self lrscheduler if self isfsdpenabled self model self modelwrapped model for the rest of this function model is the outside model whether it was wrapped or not if model is not self model self modelwrapped model backward compatibility if self isdeepspeedenabled self deepspeed self modelwrapped ckpt loading if resumefromcheckpoint is not none if self isdeepspeedenabled deepspeedloadcheckpointself modelwrapped resumefromcheckpoint elif issagemakermpenabled or self isfsdpenabled self loadfromcheckpointresumefromcheckpoint self modelwrapped check if saved optimizer or scheduler states exist self loadoptimizerandschedulerresumefromcheckpoint important at this point self model is the transformers model self modelwrapped is ddptransformers model deepspeedtransformers model fsdptransformers model dynamo optimized moduletransformers model etc train logger info running training logger infof num examples numexamples logger infof num epochs numtrainepochs logger infof instantaneous batch size per device self args perdevicetrainbatchsize if self args perdevicetrainbatchsize self trainbatchsize logger infof training with dataparallel so batch size has been adjusted to self trainbatchsize logger infof total train batch size w parallel distributed accumulation totaltrainbatchsize logger infof gradient accumulation steps args gradientaccumulationsteps logger infof total optimization steps maxsteps logger infof number of trainable parameters getmodelparamcountmodel trainableonlytrue self state epoch 0 starttime time time epochstrained 0 stepstrainedincurrentepoch 0 stepstrainedprogressbar none check if continuing training from a checkpoint if resumefromcheckpoint is not none and os path isfile os path joinresumefromcheckpoint trainerstatename self state trainerstate loadfromjsonos path joinresumefromcheckpoint trainerstatename epochstrained self state globalstep numupdatestepsperepoch if not args ignoredataskip stepstrainedincurrentepoch self state globalstep numupdatestepsperepoch stepstrainedincurrentepoch args gradientaccumulationsteps else stepstrainedincurrentepoch 0 logger info continuing training from checkpoint will skip to saved globalstep logger infof continuing training from epoch epochstrained logger infof continuing training from global step self state globalstep if not args ignoredataskip logger info f will skip the first epochstrained epochs then the first f stepstrainedincurrentepoch batches in the first epoch update the references self callbackhandler model self model self callbackhandler optimizer self optimizer self callbackhandler lrscheduler self lrscheduler self callbackhandler traindataloader traindataloader if self hpname is not none and self trial is not none use self trial because the sigoptoptuna hpo only call hpsearchsetuptrial instead of passing trial parameter to train when using ddp self state trialname self hpnameself trial if trial is not none assignments trial assignments if self hpsearchbackend hpsearchbackend sigopt else trial self state trialparams hpparamsassignments else self state trialparams none this should be the same if the state has been saved but in case the training arguments changed it s safer to set this after the load self state maxsteps maxsteps self state numtrainepochs numtrainepochs self state islocalprocesszero self islocalprocesszero self state isworldprocesszero self isworldprocesszero trloss is a tensor to avoid synchronization of tpus through item trloss torch tensor0 0 toargs device totallossscalar is updated everytime item has to be called on trloss and stores the sum of all losses self totallossscalar 0 0 self globalsteplastlogged self state globalstep model zerograd self control self callbackhandler ontrainbeginargs self state self control skip the first epochstrained epochs to get the random state of the dataloader at the right point if not args ignoredataskip for epoch in rangeepochstrained sampler getdataloadersamplertraindataloader samplerkinds randomsampler if version parseaccelerateversion version parse0 23 0 samplerkinds appendseedablerandomsampler israndomsampler isinstancesampler tuplesamplerkinds if istorchlessthan111 or not israndomsampler we just need to begin an iteration to create the randomization of the sampler for in traindataloader break else otherwise we need to call the whooooole sampler cause there is some random operation added at the very end sampler sampler if sampler is not none else listsampler totalbatchedsamples 0 for epoch in rangeepochstrained numtrainepochs epochiterator traindataloader if hasattrepochiterator setepoch epochiterator setepochepoch reset the past mems state at the beginning of each epoch if necessary if args pastindex 0 self past none stepsinepoch lenepochiterator if lendataloader is not none else args maxsteps args gradientaccumulationsteps self control self callbackhandler onepochbeginargs self state self control if epoch epochstrained and resumefromcheckpoint is not none and stepstrainedincurrentepoch 0 self loadrngstateresumefromcheckpoint rngtosync false stepsskipped 0 if stepstrainedincurrentepoch 0 epochiterator skipfirstbatchesepochiterator stepstrainedincurrentepoch stepsskipped stepstrainedincurrentepoch stepstrainedincurrentepoch 0 rngtosync true step 1 for step inputs in enumerateepochiterator totalbatchedsamples 1 if self args includenuminputtokensseen maininputname getattrself model maininputname inputids if maininputname not in inputs logger warning tried to track the number of tokens seen however the current model is not configured properly to know what item is the input to fix this add a maininputname attribute to the model class you are using else self state numinputtokensseen self accelerator gatherinputsmaininputname numel if rngtosync self loadrngstateresumefromcheckpoint rngtosync false skip past any already trained steps if resuming training if stepstrainedincurrentepoch 0 stepstrainedincurrentepoch 1 if stepstrainedprogressbar is not none stepstrainedprogressbar update1 if stepstrainedincurrentepoch 0 self loadrngstateresumefromcheckpoint continue elif stepstrainedprogressbar is not none stepstrainedprogressbar close stepstrainedprogressbar none if step args gradientaccumulationsteps 0 self control self callbackhandler onstepbeginargs self state self control with self accelerator accumulatemodel trlossstep self trainingstepmodel inputs if args loggingnaninffilter and not istorchtpuavailable and torch isnantrlossstep or torch isinftrlossstep if loss is nan or inf simply add the average of previous logged losses trloss trloss 1 self state globalstep self globalsteplastlogged else trloss trlossstep self currentflos floatself floatingpointopsinputs islaststepandstepslessthangradacc stepsinepoch args gradientaccumulationsteps and step 1 stepsinepoch if totalbatchedsamples args gradientaccumulationsteps 0 or last step in epoch but step is always smaller than gradientaccumulationsteps islaststepandstepslessthangradacc the or condition of islaststepandstepslessthangradacc is not covered in accelerate so explicitly enable sync gradients to true in that case if islaststepandstepslessthangradacc self accelerator gradientstate setsyncgradientstrue gradient clipping if args maxgradnorm is not none and args maxgradnorm 0 deepspeed does its own clipping if issagemakermpenabled and args fp16 self optimizer clipmastergradsargs maxgradnorm elif self useapex revert to normal clipping otherwise handling apex or full precision nn utils clipgradnorm amp masterparamsself optimizer args maxgradnorm else self accelerator clipgradnorm model parameters args maxgradnorm optimizer step self optimizer step optimizerwasrun not self accelerator optimizerstepwasskipped if optimizerwasrun delay optimizer scheduling until metrics are generated if not isinstanceself lrscheduler torch optim lrscheduler reducelronplateau self lrscheduler step model zerograd self state globalstep 1 self state epoch epoch step 1 stepsskipped stepsinepoch self control self callbackhandler onstependargs self state self control self maybelogsaveevaluatetrloss model trial epoch ignorekeysforeval else self control self callbackhandler onsubstependargs self state self control if self control shouldepochstop or self control shouldtrainingstop break if step 0 logger warning there seems to be not a single sample in your epochiterator stopping training at step f self state globalstep this is expected if you re using an iterabledataset and set f numsteps maxsteps higher than the number of available samples self control shouldtrainingstop true self control self callbackhandler onepochendargs self state self control self maybelogsaveevaluatetrloss model trial epoch ignorekeysforeval if debugoption tpumetricsdebug in self args debug if istorchtpuavailable tpucomment logging debug metrics for pytorchxla compile execute times ops etc xm masterprintmet metricsreport else logger warning you enabled pytorchxla debug metrics but you don t have a tpu configured check your training configuration if this is unexpected if self control shouldtrainingstop break if args pastindex and hasattrself past clean the state at the end of training delattrself past logger infonntraining completed do not forget to share your model on huggingface comodels nn if args loadbestmodelatend and self state bestmodelcheckpoint is not none wait for everyone to get here so we are sure the model has been saved by process 0 if istorchtpuavailable xm rendezvousloadbestmodelatend elif args parallelmode parallelmode distributed dist barrier elif issagemakermpenabled smp barrier self loadbestmodel add remaining trloss self totallossscalar trloss item trainloss self totallossscalar self state globalstep metrics speedmetrics train starttime numsamplesnumtrainsamples numstepsself state maxsteps numtokensnumtraintokens self storeflos metricstotalflos self state totalflos metricstrainloss trainloss self isintrain false self memorytracker stopandupdatemetricsmetrics self logmetrics rundir self getoutputdirtrial checkpointssorted self sortedcheckpointsusemtimefalse outputdirrundir delete the last checkpoint when savetotallimit1 if it s different from the best checkpoint and process allowed to save if self args shouldsave and self state bestmodelcheckpoint is not none and self args savetotallimit 1 for checkpoint in checkpointssorted if not os path samefilecheckpoint self state bestmodelcheckpoint logger infofdeleting older checkpoint checkpoint due to args savetotallimit shutil rmtreecheckpoint self control self callbackhandler ontrainendargs self state self control wait for the checkpoint to be uploaded self finishcurrentpush after training we make sure to retrieve back the original forward pass method for the embedding layer by removing the forward post hook if self neftunenoisealpha is not none self deactivateneftuneself model return trainoutputself state globalstep trainloss metrics def getoutputdirself trial if self hpsearchbackend is not none and trial is not none if self hpsearchbackend hpsearchbackend optuna runid trial number elif self hpsearchbackend hpsearchbackend ray from ray import tune runid tune gettrialid elif self hpsearchbackend hpsearchbackend sigopt runid trial id elif self hpsearchbackend hpsearchbackend wandb import wandb runid wandb run id runname self hpnametrial if self hpname is not none else frunrunid rundir os path joinself args outputdir runname else rundir self args outputdir return rundir def loadfromcheckpointself resumefromcheckpoint modelnone if model is none model self model configfile os path joinresumefromcheckpoint configname adapterweightsfile os path joinresumefromcheckpoint adapterweightsname adaptersafeweightsfile os path joinresumefromcheckpoint adaptersafeweightsname weightsfile os path joinresumefromcheckpoint weightsname weightsindexfile os path joinresumefromcheckpoint weightsindexname safeweightsfile os path joinresumefromcheckpoint safeweightsname safeweightsindexfile os path joinresumefromcheckpoint safeweightsindexname isfsdpckpt os path isdirresumefromcheckpoint and any fsdpmodelname in foldername for foldername in os listdirresumefromcheckpoint if os path isdiros path joinresumefromcheckpoint foldername if isfsdpckpt and not self isfsdpenabled raise valueerrorfcheckpoint found at resumefromcheckpoint is only supported when using pytorch fsdp if not any os path isfilef for f in weightsfile safeweightsfile weightsindexfile safeweightsindexfile adapterweightsfile adaptersafeweightsfile or isfsdpckpt raise valueerrorfcan t find a valid checkpoint at resumefromcheckpoint logger infofloading model from resumefromcheckpoint if os path isfileconfigfile config pretrainedconfig fromjsonfileconfigfile checkpointversion config transformersversion if checkpointversion is not none and checkpointversion version logger warning fyou are resuming training from a checkpoint trained with checkpointversion of ftransformers but your current version is version this is not recommended and could yield to errors or unwanted behaviors if os path isfileweightsfile or os path isfilesafeweightsfile or isfsdpckpt if the model is on the gpu it still works if issagemakermpenabled if os path isfileos path joinresumefromcheckpoint usercontent pt if the usercontent pt file exists load with the new smp api checkpoint must have been saved with the new smp api smp resumefromcheckpoint pathresumefromcheckpoint tagweightsname partialfalse loadoptimizerfalse else if the usercontent pt file does not exist load with the old smp api checkpoint must have been saved with the old smp api if hasattrself args fp16 and self args fp16 is true logger warning enabling fp16 and loading from smp 1 10 checkpoint together is not suppported statedict torch loadweightsfile maplocationcpu required for smp to not autotranslate statedict from hf to smp is already smp statedictsmpispartial false loadresult model loadstatedictstatedict stricttrue release memory del statedict elif self isfsdpenabled loadfsdpmodelself accelerator state fsdpplugin self accelerator model resumefromcheckpoint else we load the model state dict on the cpu to avoid an oom error if self args savesafetensors and os path isfilesafeweightsfile statedict safetensors torch loadfilesafeweightsfile devicecpu else statedict torch loadweightsfile maplocationcpu workaround for fsdp bug https github compytorchpytorchissues82963 which takes args instead of kwargs loadresult model loadstatedictstatedict false release memory del statedict self issuewarningsafterloadloadresult load adapters following pr 24096 elif ispeftavailable and isinstancemodel peftmodel if train a model using peft lora assume that adapter have been saved properly if hasattrmodel activeadapter and hasattrmodel loadadapter if os path existsresumefromcheckpoint model loadadapterresumefromcheckpoint model activeadapter istrainabletrue else logger warning the intermediate checkpoints of peft may not be saved correctly fconsider using a custom callback to save adapterweightsname in corresponding saving folders check some examples here https github comhuggingfacepeftissues96 else logger warningcould not load adapter model make sure to have peft0 3 0 installed else we load the sharded checkpoint loadresult loadshardedcheckpoint model resumefromcheckpoint strictissagemakermpenabled prefersafeself args savesafetensors if not issagemakermpenabled self issuewarningsafterloadloadresult def loadbestmodelself logger infofloading best model from self state bestmodelcheckpoint score self state bestmetric bestmodelpath os path joinself state bestmodelcheckpoint weightsname bestsafemodelpath os path joinself state bestmodelcheckpoint safeweightsname bestadaptermodelpath os path joinself state bestmodelcheckpoint adapterweightsname bestsafeadaptermodelpath os path joinself state bestmodelcheckpoint adaptersafeweightsname model self modelwrapped if issagemakermpenabled else self model if self isdeepspeedenabled deepspeedloadcheckpointself modelwrapped self state bestmodelcheckpoint elif self isfsdpenabled loadresult loadfsdpmodel self accelerator state fsdpplugin self accelerator model self state bestmodelcheckpoint elif os path existsbestmodelpath or os path existsbestsafemodelpath or os path existsbestadaptermodelpath or os path existsbestsafeadaptermodelpath hasbeenloaded true if issagemakermpenabled if os path isfileos path joinself state bestmodelcheckpoint usercontent pt if the usercontent pt file exists load with the new smp api checkpoint must have been saved with the new smp api smp resumefromcheckpoint pathself state bestmodelcheckpoint tagweightsname partialfalse loadoptimizerfalse else if the usercontent pt file does not exist load with the old smp api checkpoint must have been saved with the old smp api if self args savesafetensors and os path isfilebestsafemodelpath statedict safetensors torch loadfilebestsafemodelpath devicecpu else statedict torch loadbestmodelpath maplocationcpu statedictsmpispartial false loadresult model loadstatedictstatedict stricttrue else if ispeftavailable and isinstancemodel peftmodel if train a model using peft lora assume that adapter have been saved properly if hasattrmodel activeadapter and hasattrmodel loadadapter if os path existsbestadaptermodelpath or os path existsbestsafeadaptermodelpath model loadadapterself state bestmodelcheckpoint model activeadapter loadadapter has no return value present modify it when appropriate from torch nn modules module import incompatiblekeys loadresult incompatiblekeys else logger warning the intermediate checkpoints of peft may not be saved correctly fconsider using a custom callback to save adapterweightsname in corresponding saving folders check some examples here https github comhuggingfacepeftissues96 hasbeenloaded false else logger warningcould not load adapter model make sure to have peft0 3 0 installed hasbeenloaded false else we load the model state dict on the cpu to avoid an oom error if self args savesafetensors and os path isfilebestsafemodelpath statedict safetensors torch loadfilebestsafemodelpath devicecpu else statedict torch loadbestmodelpath maplocationcpu if the model is on the gpu it still works workaround for fsdp bug https github compytorchpytorchissues82963 which takes args instead of kwargs loadresult model loadstatedictstatedict false if not issagemakermpenabled and hasbeenloaded self issuewarningsafterloadloadresult elif os path existsos path joinself state bestmodelcheckpoint weightsindexname loadresult loadshardedcheckpoint model self state bestmodelcheckpoint strictissagemakermpenabled if not issagemakermpenabled self issuewarningsafterloadloadresult else logger warning fcould not locate the best model at bestmodelpath if you are running a distributed training on multiple nodes you should activate saveoneachnode def issuewarningsafterloadself loadresult if lenloadresult missingkeys 0 if self model keystoignoreonsave is not none and setloadresult missingkeys set self model keystoignoreonsave self model tieweights else logger warningfthere were missing keys in the checkpoint model loaded loadresult missingkeys if lenloadresult unexpectedkeys 0 logger warning fthere were unexpected keys in the checkpoint model loaded loadresult unexpectedkeys def maybelogsaveevaluateself trloss model trial epoch ignorekeysforeval if self control shouldlog if istorchtpuavailable xm markstep logs dictstr float allgather mean to get average loss over all processes trlossscalar self nestedgathertrloss mean item reset trloss to zero trloss trloss logsloss roundtrlossscalar self state globalstep self globalsteplastlogged 4 logslearningrate self getlearningrate self totallossscalar trlossscalar self globalsteplastlogged self state globalstep self storeflos self loglogs metrics none if self control shouldevaluate if isinstanceself evaldataset dict metrics for evaldatasetname evaldataset in self evaldataset items datasetmetrics self evaluate evaldatasetevaldataset ignorekeysignorekeysforeval metrickeyprefixfevalevaldatasetname metrics updatedatasetmetrics else metrics self evaluateignorekeysignorekeysforeval self reporttohpsearchtrial self state globalstep metrics run delayed lr scheduler now that metrics are populated if isinstanceself lrscheduler torch optim lrscheduler reducelronplateau metrictocheck self args metricforbestmodel if not metrictocheck startswitheval metrictocheck fevalmetrictocheck self lrscheduler stepmetricsmetrictocheck if self control shouldsave self savecheckpointmodel trial metricsmetrics self control self callbackhandler onsaveself args self state self control def loadrngstateself checkpoint load rng states from checkpoint if checkpoint is none return if self args worldsize 1 processindex self args processindex rngfile os path joincheckpoint frngstateprocessindex pth if not os path isfilerngfile logger info fdidn t find an rng file for process processindex if you are resuming a training that wasn t launched in a distributed fashion reproducibility is not guaranteed return else rngfile os path joincheckpoint rngstate pth if not os path isfilerngfile logger info didn t find an rng file if you are resuming a training that was launched in a distributed fashion reproducibility is not guaranteed return checkpointrngstate torch loadrngfile random setstatecheckpointrngstatepython np random setstatecheckpointrngstatenumpy torch random setrngstatecheckpointrngstatecpu if torch cuda isavailable if self args parallelmode parallelmode distributed torch cuda random setrngstateallcheckpointrngstatecuda else try torch cuda random setrngstatecheckpointrngstatecuda except exception as e logger info fdidn t manage to set back the rng states of the gpu because of the following error n e nthis won t yield the same results as if the training had not been interrupted if istorchtpuavailable xm setrngstatecheckpointrngstatexla if istorchnpuavailable if self args parallelmode parallelmode distributed torch npu random setrngstateallcheckpointrngstatenpu else try torch npu random setrngstatecheckpointrngstatenpu except exception as e logger info fdidn t manage to set back the rng states of the npu because of the following error n e nthis won t yield the same results as if the training had not been interrupted def savecheckpointself model trial metricsnone in all cases including ddpdpdeepspeed self model is always a reference to the model we want to save except fullyshardedddp assert unwrapmodelmodel is self model internal model should be a reference to self model save model checkpoint checkpointfolder fprefixcheckpointdirself state globalstep if self hpsearchbackend is none and trial is none self storeflos rundir self getoutputdirtrialtrial outputdir os path joinrundir checkpointfolder self savemodeloutputdir internalcalltrue if not self args saveonlymodel save optimizer and scheduler self saveoptimizerandscheduleroutputdir save rng state self saverngstateoutputdir determine the new best metric best model checkpoint if metrics is not none and self args metricforbestmodel is not none metrictocheck self args metricforbestmodel if not metrictocheck startswitheval metrictocheck fevalmetrictocheck metricvalue metricsmetrictocheck operator np greater if self args greaterisbetter else np less if self state bestmetric is none or self state bestmodelcheckpoint is none or operatormetricvalue self state bestmetric self state bestmetric metricvalue self state bestmodelcheckpoint outputdir save the trainer state if self args shouldsave self state savetojsonos path joinoutputdir trainerstatename if self args pushtohub self pushfromcheckpointoutputdir maybe delete some older checkpoints if self args shouldsave self rotatecheckpointsusemtimetrue outputdirrundir def saverngstateself outputdir save rng state in nondistributed training rngstates python random getstate numpy np random getstate cpu torch random getrngstate if torch cuda isavailable if self args parallelmode parallelmode distributed in non distributed we save the global cuda rng state will take care of dataparallel rngstatescuda torch cuda random getrngstateall else rngstatescuda torch cuda random getrngstate if istorchtpuavailable rngstatesxla xm getrngstate if istorchnpuavailable if self args parallelmode parallelmode distributed rngstatesnpu torch npu random getrngstateall else rngstatesnpu torch npu random getrngstate a process can arrive here before the process 0 has a chance to save the model in which case outputdir may not yet exist os makedirsoutputdir existoktrue if self args worldsize 1 torch saverngstates os path joinoutputdir rngstate pth else torch saverngstates os path joinoutputdir frngstateself args processindex pth def saveoptimizerandschedulerself outputdir if istorchtpuavailable xm rendezvoussavingoptimizerstates xm saveself optimizer statedict os path joinoutputdir optimizername with warnings catchwarningsrecordtrue as caughtwarnings xm saveself lrscheduler statedict os path joinoutputdir schedulername reissueptwarningscaughtwarnings elif issagemakermpenabled optstatedict self optimizer localstatedictgatherifshardfalse smp barrier if smp rdprank 0 or smp state cfg shardoptimizerstate smp save optstatedict os path joinoutputdir optimizername partialtrue v3smp state cfg shardoptimizerstate elif self isdeepspeedenabled under zero3 model file itself doesn t get saved since it s bogus unless deepspeed config stage3gather16bitweightsonmodelsave is true self modelwrapped savecheckpointoutputdir elif self isfsdpenabled save fsdp specific ckpt for resuming from ckpt savefsdpmodelself accelerator state fsdpplugin self accelerator self model outputdir savefsdpoptimizer self accelerator state fsdpplugin self accelerator self optimizer self model outputdir elif self args shouldsave deepspeed savecheckpoint above saves modeloptimsched torch saveself optimizer statedict os path joinoutputdir optimizername save scheduler scaler isdeepspeedcustomscheduler self isdeepspeedenabled and not isinstance self lrscheduler deepspeedschedulerwrapper if self args shouldsave and not self isdeepspeedenabled or isdeepspeedcustomscheduler and not istorchtpuavailable with warnings catchwarningsrecordtrue as caughtwarnings torch saveself lrscheduler statedict os path joinoutputdir schedulername reissueptwarningscaughtwarnings def loadoptimizerandschedulerself checkpoint deepspeed loads optimizerlrscheduler together with the model in deepspeedinit load in optimizer and scheduler states on tpu we have to take some extra precautions to properly load the states on the right device optimizer checkpoint was saved with smp 1 10 optimizer checkpoint was saved with smp 1 10 we use the cpu when training on one gpu to avoid oom for gpu ram when training big models in distributed training however we load directly on each gpu and risk the gpu oom as it s more likely to get oom on cpu since we load numgpu times the optimizer state launch an hyperparameter search using optuna or ray tune or sigopt the optimized quantity is determined by computeobjective which defaults to a function returning the evaluation loss when no metric is provided the sum of all metrics otherwise tip warningtrue to use this method you need to have provided a modelinit when initializing your trainer we need to reinitialize the model at each new run this is incompatible with the optimizers argument so you need to subclass trainer and override the method trainer createoptimizerandscheduler for custom optimizerscheduler tip args hpspace callableoptuna trial dictstr float optional a function that defines the hyperparameter search space will default to trainerutils defaulthpspaceoptuna or trainerutils defaulthpspaceray or trainerutils defaulthpspacesigopt depending on your backend computeobjective callabledictstr float float optional a function computing the objective to minimize or maximize from the metrics returned by the evaluate method will default to trainerutils defaultcomputeobjective ntrials int optional defaults to 100 the number of trial runs to test direction str or liststr optional defaults to minimize if it s single objective optimization direction is str can be minimize or maximize you should pick minimize when optimizing the validation loss maximize when optimizing one or several metrics if it s multi objectives optimization direction is liststr can be list of minimize and maximize you should pick minimize when optimizing the validation loss maximize when optimizing one or several metrics backend str or trainingutils hpsearchbackend optional the backend to use for hyperparameter search will default to optuna or ray tune or sigopt depending on which one is installed if all are installed will default to optuna hpname callableoptuna trial str optional a function that defines the trialrun name will default to none kwargs dictstr any optional additional keyword arguments passed along to optuna createstudy or ray tune run for more information see the documentation of optuna createstudyhttps optuna readthedocs ioenstablereferencegeneratedoptuna study createstudy html the documentation of tune runhttps docs ray ioenlatesttuneapidocsexecution htmltunerun the documentation of sigopthttps app sigopt comdocsendpointsexperimentscreate returns trainerutils bestrun or listtrainerutils bestrun all the information about the best run or best runs for multiobjective optimization experiment summary can be found in runsummary attribute for ray backend log logs on the various objects watching training subclass and override this method to inject custom behavior args logs dictstr float the values to log prepares one data before feeding it to the model be it a tensor or a nested listdictionary of tensors nlp models inputs are intuint and those get adjusted to the right dtype of the embedding other models such as wav2vec2 s inputs are already float and thus may need special handling to match the dtypes of the model prepare inputs before feeding them to the model converting them to tensors if they are not already and handling potential state a helper wrapper to group together context managers a helper wrapper that creates an appropriate context manager for autocast while feeding it the desired arguments depending on the situation perform a training step on a batch of inputs subclass and override to inject custom behavior args model nn module the model to train inputs dictstr uniontorch tensor any the inputs and targets of the model the dictionary will be unpacked before being fed to the model most models expect the targets under the argument labels check your model s documentation for all accepted arguments return torch tensor the tensor with training loss on this batch how the loss is computed by trainer by default all models return the loss in the first element subclass and override for custom behavior save past state if it exists todo this needs to be fixed and made cleaner later we don t use loss here since the model may return tuples instead of modeloutput whether or not this process is the local e g on one machine if training in a distributed fashion on several machines main process whether or not this process is the global main process when training in a distributed fashion on several machines this is only going to be true for one process special case for sagemaker modelparallel since there processindex is dpprocessindex not the global process index will save the model so you can reload it using frompretrained will only save from the main process calling the statedict needs to be done on the wrapped model and on all processes usercontent pt indicates model statedict saved with smp 1 10 remove the dummy statedict push to the hub when savemodel is called by the user save a trained model and configuration using savepretrained they can then be reloaded using frompretrained if we are executing this function we are the process zero so we don t check for that save a trained model and configuration using savepretrained they can then be reloaded using frompretrained good practice save your training arguments together with the trained model storing the number of floatingpoint operations that went into the model make sure we don t delete the best model check if we should delete older checkpoints if savetotallimit1 with loadbestmodelatendtrue we could end up deleting the last checkpoint which we don t do to allow resuming run evaluation and returns metrics the calling script will be responsible for providing a method to compute metrics as they are taskdependent pass it to the init computemetrics argument you can also subclass and override this method to inject custom behavior args evaldataset dataset optional pass a dataset if you wish to override self evaldataset if it is a datasets dataset columns not accepted by the model forward method are automatically removed it must implement the len method ignorekeys liststr optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metrickeyprefix str optional defaults to eval an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named evalbleu if the prefix is eval default returns a dictionary containing the evaluation loss and the potential metrics computed from the predictions the dictionary also contains the epoch number which comes from the training state memory metrics must set up as early as possible no point gathering the predictions if there are no metrics otherwise we defer to self args predictionlossonly tpucomment logging debug metrics for pytorchxla compile execute times ops etc run prediction and returns predictions and potential metrics depending on the dataset and your use case your test dataset may contain labels in that case this method will also return metrics like in evaluate args testdataset dataset dataset to run the predictions on if it is an datasets dataset columns not accepted by the model forward method are automatically removed has to implement the method len ignorekeys liststr optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metrickeyprefix str optional defaults to test an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named testbleu if the prefix is test default tip if your predictions or labels have different sequence length for instance because you re doing dynamic padding in a token classification task the predictions will be padded on the right to allow for concatenation into one array the padding index is 100 tip returns namedtuple a namedtuple with the following keys predictions np ndarray the predictions on testdataset labelids np ndarray optional the labels if the dataset contained some metrics dictstr float optional the potential dictionary of metrics if the dataset contained labels memory metrics must set up as early as possible predictionevaluation loop shared by trainer evaluate and trainer predict works both with or without labels if eval is called wo train handle model prep here for the rest of this function model is the outside model whether it was wrapped or not backward compatibility if full fp16 or bf16 eval is wanted and this evaluation or predict isn t called while train is running cast it to the right dtype first and then put on device do this before wrapping initialize containers lossespredslabels on gputpu accumulated for evalaccumulationsteps lossespredslabels on cpu final containers will be useful when we have an iterable dataset so don t know its length main evaluation loop update the observed num examples for batch samplers batchsize is not known by the dataloader in advance prediction step update containers on host gather all tensors and put them back on the cpu if we have done enough accumulation steps set back to none to begin a new accumulation after all calls to gatherfunction reset to gatherformetrics clean the state at the end of the evaluation loop gather all remaining tensors and put them back on the cpu number of samples the instance check is weird and does not actually check for the type but whether the dataset has the right methods therefore we need to make sure it also has the attribute metrics to be jsonserializable we need to remove numpy types or zerod tensors prefix all keys with metrickeyprefix gather value of tensors tensor or listtuple of nested tensors and convert them to numpy before concatenating them to gathered perform an evaluation step on model using inputs subclass and override to inject custom behavior args model nn module the model to evaluate inputs dictstr uniontorch tensor any the inputs and targets of the model the dictionary will be unpacked before being fed to the model most models expect the targets under the argument labels check your model s documentation for all accepted arguments predictionlossonly bool whether or not to return the loss only ignorekeys liststr optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions return tupleoptionaltorch tensor optionaltorch tensor optionaltorch tensor a tuple with the loss logits and labels each being optional for cliplike models capable of returning loss values if returnloss is not specified or being none in inputs we check if the default value of returnloss is true in model forward labels may be popped when computing the loss label smoothing for instance so we grab them first todo this needs to be fixed and made cleaner later for models that inherit from pretrainedmodel uses that method to compute the number of floating point operations for every backward forward pass if using another model either implement such a method in the model or subclass and override this method args inputs dictstr uniontorch tensor any the inputs and targets of the model returns int the number of floatingpoint operations initializes a git repo in self args hubmodelid only on process zero creates a draft of a model card using the information available to the trainer args language str optional the language of the model if applicable license str optional the license of the model will default to the license of the pretrained model used if the original model given to the trainer comes from a repo on the hub tags str or liststr optional some tags to be included in the metadata of the model card modelname str optional the name of the model finetunedfrom str optional the name of the model used to finetune this one if applicable will default to the name of the repo of the original model given to the trainer if it comes from the hub tasks str or liststr optional one or several task identifiers to be included in the metadata of the model card datasettags str or liststr optional one or several dataset tags to be included in the metadata of the model card dataset str or liststr optional one or several dataset identifiers to be included in the metadata of the model card datasetargs str or liststr optional one or several dataset arguments to be included in the metadata of the model card only push from one node if we haven t finished the last push we don t do this one unless args hubalwayspushtrue to avoid a new synchronization of all model weights we just copy the file from the checkpoint folder saving the tokenizer is fast and we don t know how many files it may have spawned so we resave it to be sure same for the training arguments upload self model and self tokenizer to the model hub on the repo self args hubmodelid parameters commitmessage str optional defaults to end of training message to commit while pushing blocking bool optional defaults to true whether the function should return only when the git push has finished kwargs dictstr any optional additional keyword arguments passed along to trainer createmodelcard returns the url of the repository where the model was pushed if blockingfalse or a future object tracking the progress of the commit if blockingtrue in case the user calls this method with args pushtohub false needs to be executed on all processes for tpu training but will only save on the processed determined by self args shouldsave only push from one node wait for the current upload to be finished deprecated code predictionevaluation loop shared by trainer evaluate and trainer predict works both with or without labels if eval is called wo train handle model prep here for the rest of this function model is the outside model whether it was wrapped or not backward compatibility if full fp16 or bf16 eval is wanted and this evaluation or predict isn t called while train is running cast it to the right dtype first and then put on device the actual number of evalsample can be greater than numexamples in distributed settings when we pass a batch size to the sampler gather all tensors and put them back on the cpu if we have done enough accumulation steps set back to none to begin a new accumulation clean the state at the end of the evaluation loop gather all remaining tensors and put them back on the cpu to be jsonserializable we need to remove numpy types or zerod tensors prefix all keys with metrickeyprefix gather value of tensors tensor or listtuple of nested tensors and convert them to numpy before concatenating them to gathered add sagemaker checkpointing patterns to gitignore file make sure we only do this on the main process if not self isworldprocesszero return patterns sagemakeruploading sagemakeruploaded get current gitignore content if os path existsos path joinself repo localdir gitignore with openos path joinself repo localdir gitignore r as f currentcontent f read else currentcontent add the patterns to gitignore content currentcontent for pattern in patterns if pattern not in content if content endswithn content pattern else content fnpattern write the gitignore file if it has changed if content currentcontent with openos path joinself repo localdir gitignore w as f logger debugfwriting gitignore file content content f writecontent self repo gitadd gitignore avoid race condition with git status time sleep0 5 if not self repo isrepoclean self repo gitcommitadd sagemaker patterns to gitignore self repo gitpush def createacceleratorandpostprocessself gradacckwargs numsteps self args gradientaccumulationsteps gradacckwargssyncwithdataloader false gradientaccumulationplugin gradientaccumulationplugingradacckwargs create accelerator object self accelerator accelerator dispatchbatchesself args dispatchbatches splitbatchesself args splitbatches deepspeedpluginself args deepspeedplugin gradientaccumulationplugingradientaccumulationplugin some trainer classes need to use gather instead of gatherformetrics thus we store a flag self gatherfunction self accelerator gatherformetrics deepspeed and accelerate flags covering both trainer args and accelerate launcher self isdeepspeedenabled getattrself accelerator state deepspeedplugin none is not none self isfsdpenabled getattrself accelerator state fsdpplugin none is not none post accelerator creation setup if self isfsdpenabled fsdpplugin self accelerator state fsdpplugin fsdpplugin limitallgathers self args fsdpconfig get limitallgathers fsdpplugin limitallgathers if isaccelerateavailable0 23 0 fsdpplugin activationcheckpointing self args fsdpconfig get activationcheckpointing fsdpplugin activationcheckpointing if fsdpplugin activationcheckpointing and self args gradientcheckpointing raise valueerror the activationcheckpointing in fsdp config and the gradientcheckpointing in training arg can t be set to true simultaneously please use fsdp s activationcheckpointing logic when using fsdp if self isdeepspeedenabled if getattrself args hfdeepspeedconfig none is none from transformers integrations deepspeed import hftrainerdeepspeedconfig dsplugin self accelerator state deepspeedplugin dsplugin hfdsconfig hftrainerdeepspeedconfigdsplugin hfdsconfig config dsplugin deepspeedconfig dsplugin hfdsconfig config dsplugin hfdsconfig trainerconfigprocessself args coding utf 8 2020 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the trainer class to easily train a transformers from scratch or finetune it on a new task integrations must be imported before ml frameworks isort off isort on name of the files used for checkpointing trainer is a simple but feature complete training and eval loop for pytorch optimized for transformers args model pretrainedmodel or torch nn module optional the model to train evaluate or use for predictions if not provided a model_init must be passed tip trainer is optimized to work with the pretrainedmodel provided by the library you can still use your own models defined as torch nn module as long as they work the same way as the transformers models tip args trainingarguments optional the arguments to tweak for training will default to a basic instance of trainingarguments with the output_dir set to a directory named tmp_trainer in the current directory if not provided data_collator datacollator optional the function to use to form a batch from a list of elements of train_dataset or eval_dataset will default to default_data_collator if no tokenizer is provided an instance of datacollatorwithpadding otherwise train_dataset torch utils data dataset or torch utils data iterabledataset optional the dataset to use for training if it is a datasets dataset columns not accepted by the model forward method are automatically removed note that if it s a torch utils data iterabledataset with some randomization and you are training in a distributed fashion your iterable dataset should either use a internal attribute generator that is a torch generator for the randomization that must be identical on all processes and the trainer will manually set the seed of this generator at each epoch or have a set_epoch method that internally sets the seed of the rngs used eval_dataset union torch utils data dataset dict str torch utils data dataset optional the dataset to use for evaluation if it is a datasets dataset columns not accepted by the model forward method are automatically removed if it is a dictionary it will evaluate on each dataset prepending the dictionary key to the metric name tokenizer pretrainedtokenizerbase optional the tokenizer used to preprocess the data if provided will be used to automatically pad the inputs to the maximum length when batching inputs and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine tuned model model_init callable pretrainedmodel optional a function that instantiates the model to be used if provided each call to trainer train will start from a new instance of the model as given by this function the function may have zero argument or a single one containing the optuna ray tune sigopt trial object to be able to choose different architectures according to hyper parameters such as layer count sizes of inner layers dropout probabilities etc compute_metrics callable evalprediction dict optional the function that will be used to compute metrics at evaluation must take a evalprediction and return a dictionary string to metric values callbacks list of trainercallback optional a list of callbacks to customize the training loop will add those to the list of default callbacks detailed in here callback if you want to remove one of the default callbacks used use the trainer remove_callback method optimizers tuple torch optim optimizer torch optim lr_scheduler lambdalr optional defaults to none none a tuple containing the optimizer and the scheduler to use will default to an instance of adamw on your model and a scheduler given by get_linear_schedule_with_warmup controlled by args preprocess_logits_for_metrics callable torch tensor torch tensor torch tensor optional a function that preprocess the logits right before caching them at each evaluation step must take two tensors the logits and the labels and return the logits once processed as desired the modifications made by this function will be reflected in the predictions received by compute_metrics note that the labels second parameter will be none if the dataset does not have them important attributes model always points to the core model if using a transformers model it will be a pretrainedmodel subclass model_wrapped always points to the most external model in case one or more other modules wrap the original model this is the model that should be used for the forward pass for example under deepspeed the inner model is wrapped in deepspeed and then again in torch nn distributeddataparallel if the inner model hasn t been wrapped then self model_wrapped is the same as self model is_model_parallel whether or not a model has been switched to a model parallel mode different from data parallelism this means some of the model layers are split on different gpus place_model_on_device whether or not to automatically place the model on the device it will be set to false if model parallel or deepspeed is used or if the default trainingarguments place_model_on_device is overridden to return false is_in_train whether or not a model is currently running train e g when evaluate is called while in train those are used as methods of the trainer in examples seed must be set before instantiating the model when using model memory metrics must set up as early as possible set the correct log level depending on the node force device and distributed setup init explicitly warn users at this stage the model is already loaded one place to sort out whether to place the model on device or not postpone switching model to cuda when 1 mp since we are trying to fit a much bigger than 1 gpu model 2 fp16 enabled deepspeed loads the model in half the size and it doesn t need to anyway and we only use deepspeed for training at the moment 3 full bf16 or fp16 eval since the model needs to be cast to the right dtype first 4 fsdp same as mp bnb quantized models doesn t support to operation force n_gpu to 1 to avoid dataparallel as mp will manage the gpus later use self model is self model_wrapped to check if it s wrapped or not will be set to true by self _setup_loggers on first call to self log create distant repo and output directory if needed mixed precision setup mixed precision setup for sagemaker model parallel bf16 model parallelism in sagemaker currently not supported raise an error when there s mismatch between smp config and trainer argument use smp config as truth smp 1 10 does not support fp16 in trainer deepspeed and sagemaker model parallel manage their own half precision label smoothing internal variable to count flos in each process will be accumulated in self state total_flos then returned to 0 every time flos need to be logged internal variables to help with automatic batch size reduction very last torch compile activates the neftune as presented in this code https github com neelsjain neftune and paper https arxiv org abs 2310 05914 deactivates the neftune method make sure to call _activate_neftune first add a callback to the current list of transformers trainercallback args callback type or transformers trainercallback a transformers trainercallback class or an instance of a transformers trainercallback in the first case will instantiate a member of that class remove a callback from the current list of transformers trainercallback and returns it if the callback is not found returns none and no error is raised args callback type or transformers trainercallback a transformers trainercallback class or an instance of a transformers trainercallback in the first case will pop the first member of that class found in the list of callbacks returns transformers trainercallback the callback removed if found remove a callback from the current list of transformers trainercallback args callback type or transformers trainercallback a transformers trainercallback class or an instance of a transformers trainercallback in the first case will remove the first member of that class found in the list of callbacks moving a model to an xla device disconnects the tied weights so we have to retie them inspect model forward signature to keep only the arguments it accepts labels may be named label or label_ids the default data collator handles that wrap the data collator in a callable removing unused columns build the sampler returns the training torch utils data dataloader will use no sampler if train_dataset does not implement __len__ a random sampler adapted to distributed training if necessary otherwise subclass and override this method if you want to inject some custom behavior deprecated code returns the evaluation torch utils data dataloader subclass and override this method if you want to inject some custom behavior args eval_dataset torch utils data dataset optional if provided will override self eval_dataset if it is a datasets dataset columns not accepted by the model forward method are automatically removed it must implement __len__ returns the test torch utils data dataloader subclass and override this method if you want to inject some custom behavior args test_dataset torch utils data dataset optional the test dataset to use if it is a datasets dataset columns not accepted by the model forward method are automatically removed it must implement __len__ we use the same batch_size as for eval setup the optimizer and the learning rate scheduler we provide a reasonable default that works well if you want to use something else you can pass a tuple in the trainer s init through optimizers or subclass and override this method or create_optimizer and or create_scheduler in a subclass if smp 1 10 and fp16 is enabled we unwrap the optimizer get all parameter names that weight decay will be applied to note that some models implement their own layernorm instead of calling nn layernorm weight decay could still apply to those modules since this function only filter out instance of nn layernorm setup the optimizer we provide a reasonable default that works well if you want to use something else you can pass a tuple in the trainer s init through optimizers or subclass and override this method in a subclass returns the optimizer class and optimizer parameters based on the training arguments args args transformers training_args trainingarguments the training arguments for the training session parse args optim_args todo change dtypes back to m fp32 var bf16 kahan false once they can be cast together in torchdistx setup the scheduler the optimizer of the trainer must have been set up either before this method is called or passed as an argument args num_training_steps int the number of training steps to do helper to get number of samples in a torch utils data dataloader by accessing its dataset when dataloader dataset does not exist or has no length estimates as best it can special case for iterabledatasetshard we need to dig deeper no dataset or length estimate by length of dataloader helper to get number of tokens in a torch utils data dataloader by enumerating dataloader hp search setup code casting value to the proper type rebuild the deepspeed config to reflect the updated training parameters remove mixed precision hooks from the model conv_bn_folding is disabled as it fails in symbolic tracing resulting in ipex warnings wrapping the base model twice in a distributedmodel will raise an error train eval could be run multiple times if already wrapped don t re wrap it again mixed precision training with apex torch 1 6 multi gpu training should be after apex fp16 initialization 8bit models does not support ddp note in torch distributed mode there s no point in wrapping the model inside a distributeddataparallel as we ll be under no_grad anyways distributed training should be after apex fp16 initialization distributed training using pytorch fsdp transformer layer class to wrap apply gradient checkpointing to auto wrapped sub modules if specified wrap the base model with an outer fsdp wrapper patch xm optimizer_step should not reduce gradients in this case as fsdp does not need gradient reduction over sharded parameters find_unused_parameters breaks checkpointing as per https github com huggingface transformers pull 4659 issuecomment 643356021 main training entry point args resume_from_checkpoint str or bool optional if a str local path to a saved checkpoint as saved by a previous instance of trainer if a bool and equals true load the last checkpoint in args output_dir as saved by a previous instance of trainer if present training will resume from the model optimizer scheduler states loaded here trial optuna trial or dict str any optional the trial run or the hyperparameter dictionary for hyperparameter search ignore_keys_for_eval list str optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions for evaluation during the training kwargs dict str any optional additional keyword arguments used to hide deprecated arguments memory metrics must set up as early as possible attach neftune hooks if necessary do_train is not a reliable argument as it might not be set and train still called so the following is a workaround this might change the seed so needs to run first model re init seed must be set before instantiating the model when using model_init reinitializes optimizer and scheduler load potential model checkpoint if model was re initialized put it on the right device and update self model_wrapped disable progress bars when uploading models during checkpoints to avoid polluting stdout data loader and number of training steps setting up training control variables number of training epochs num_train_epochs number of training steps per epoch num_update_steps_per_epoch total number of training steps to execute max_steps may be slightly incorrect if the last batch in the training dataloader has a smaller size but it s the best we can do rely on max_steps when dataloader does not have a working size setting a very large number of epochs so we go as many times as necessary over the iterator nn dataparallel model replicates the model creating new variables and module references registered here no longer work on other gpus breaking the module noqa we need to reset the scheduler as its parameters may be different on subsequent calls compute absolute values for logging eval and save if given as ratio activate gradient checkpointing if needed as the model is wrapped don t use accelerator prepare this is for unhandled cases such as fsdp xla sagemaker mp dp dataparallel ipex prepare using accelerator prepare to handle cases wherein we pass dummyscheduler such as when it is specified in deepspeed config for the rest of this function model is the outside model whether it was wrapped or not backward compatibility ckpt loading check if saved optimizer or scheduler states exist important at this point self model is the transformers model self model_wrapped is ddp transformers model deepspeed transformers model fsdp transformers model dynamo optimized module transformers model etc train check if continuing training from a checkpoint update the references use self _trial because the sigopt optuna hpo only call _hp_search_setup trial instead of passing trial parameter to train when using ddp this should be the same if the state has been saved but in case the training arguments changed it s safer to set this after the load tr_loss is a tensor to avoid synchronization of tpus through item _total_loss_scalar is updated everytime item has to be called on tr_loss and stores the sum of all losses skip the first epochs_trained epochs to get the random state of the dataloader at the right point we just need to begin an iteration to create the randomization of the sampler otherwise we need to call the whooooole sampler cause there is some random operation added at the very end reset the past mems state at the beginning of each epoch if necessary skip past any already trained steps if resuming training if loss is nan or inf simply add the average of previous logged losses last step in epoch but step is always smaller than gradient_accumulation_steps the or condition of is_last_step_and_steps_less_than_grad_acc is not covered in accelerate so explicitly enable sync gradients to true in that case gradient clipping deepspeed does its own clipping revert to normal clipping otherwise handling apex or full precision optimizer step delay optimizer scheduling until metrics are generated tpu comment logging debug metrics for pytorch xla compile execute times ops etc clean the state at the end of training wait for everyone to get here so we are sure the model has been saved by process 0 add remaining tr_loss delete the last checkpoint when save_total_limit 1 if it s different from the best checkpoint and process allowed to save wait for the checkpoint to be uploaded after training we make sure to retrieve back the original forward pass method for the embedding layer by removing the forward post hook if the model is on the gpu it still works if the user_content pt file exists load with the new smp api checkpoint must have been saved with the new smp api if the user_content pt file does not exist load with the old smp api checkpoint must have been saved with the old smp api required for smp to not auto translate state_dict from hf to smp is already smp release memory we load the model state dict on the cpu to avoid an oom error workaround for fsdp bug https github com pytorch pytorch issues 82963 which takes args instead of kwargs release memory load adapters following pr 24096 if train a model using peft lora assume that adapter have been saved properly we load the sharded checkpoint if the user_content pt file exists load with the new smp api checkpoint must have been saved with the new smp api if the user_content pt file does not exist load with the old smp api checkpoint must have been saved with the old smp api if train a model using peft lora assume that adapter have been saved properly load_adapter has no return value present modify it when appropriate we load the model state dict on the cpu to avoid an oom error if the model is on the gpu it still works workaround for fsdp bug https github com pytorch pytorch issues 82963 which takes args instead of kwargs all_gather mean to get average loss over all processes reset tr_loss to zero run delayed lr scheduler now that metrics are populated load rng states from checkpoint in all cases including ddp dp deepspeed self model is always a reference to the model we want to save except fullyshardedddp assert unwrap_model model is self model internal model should be a reference to self model save model checkpoint save optimizer and scheduler save rng state determine the new best metric best model checkpoint save the trainer state maybe delete some older checkpoints save rng state in non distributed training in non distributed we save the global cuda rng state will take care of dataparallel a process can arrive here before the process 0 has a chance to save the model in which case output_dir may not yet exist under zero3 model file itself doesn t get saved since it s bogus unless deepspeed config stage3_gather_16bit_weights_on_model_save is true save fsdp specific ckpt for resuming from ckpt deepspeed save_checkpoint above saves model optim sched save scheduler scaler if optimizer and scheduler states exist load them deepspeed loads optimizer lr_scheduler together with the model in deepspeed_init load in optimizer and scheduler states on tpu we have to take some extra precautions to properly load the states on the right device optimizer checkpoint was saved with smp 1 10 optimizer checkpoint was saved with smp 1 10 we use the cpu when training on one gpu to avoid oom for gpu ram when training big models in distributed training however we load directly on each gpu and risk the gpu oom as it s more likely to get oom on cpu since we load num_gpu times the optimizer state launch an hyperparameter search using optuna or ray tune or sigopt the optimized quantity is determined by compute_objective which defaults to a function returning the evaluation loss when no metric is provided the sum of all metrics otherwise tip warning true to use this method you need to have provided a model_init when initializing your trainer we need to reinitialize the model at each new run this is incompatible with the optimizers argument so you need to subclass trainer and override the method trainer create_optimizer_and_scheduler for custom optimizer scheduler tip args hp_space callable optuna trial dict str float optional a function that defines the hyperparameter search space will default to trainer_utils default_hp_space_optuna or trainer_utils default_hp_space_ray or trainer_utils default_hp_space_sigopt depending on your backend compute_objective callable dict str float float optional a function computing the objective to minimize or maximize from the metrics returned by the evaluate method will default to trainer_utils default_compute_objective n_trials int optional defaults to 100 the number of trial runs to test direction str or list str optional defaults to minimize if it s single objective optimization direction is str can be minimize or maximize you should pick minimize when optimizing the validation loss maximize when optimizing one or several metrics if it s multi objectives optimization direction is list str can be list of minimize and maximize you should pick minimize when optimizing the validation loss maximize when optimizing one or several metrics backend str or training_utils hpsearchbackend optional the backend to use for hyperparameter search will default to optuna or ray tune or sigopt depending on which one is installed if all are installed will default to optuna hp_name callable optuna trial str optional a function that defines the trial run name will default to none kwargs dict str any optional additional keyword arguments passed along to optuna create_study or ray tune run for more information see the documentation of optuna create_study https optuna readthedocs io en stable reference generated optuna study create_study html the documentation of tune run https docs ray io en latest tune api_docs execution html tune run the documentation of sigopt https app sigopt com docs endpoints experiments create returns trainer_utils bestrun or list trainer_utils bestrun all the information about the best run or best runs for multi objective optimization experiment summary can be found in run_summary attribute for ray backend log logs on the various objects watching training subclass and override this method to inject custom behavior args logs dict str float the values to log prepares one data before feeding it to the model be it a tensor or a nested list dictionary of tensors nlp models inputs are int uint and those get adjusted to the right dtype of the embedding other models such as wav2vec2 s inputs are already float and thus may need special handling to match the dtypes of the model prepare inputs before feeding them to the model converting them to tensors if they are not already and handling potential state a helper wrapper to group together context managers a helper wrapper that creates an appropriate context manager for autocast while feeding it the desired arguments depending on the situation perform a training step on a batch of inputs subclass and override to inject custom behavior args model nn module the model to train inputs dict str union torch tensor any the inputs and targets of the model the dictionary will be unpacked before being fed to the model most models expect the targets under the argument labels check your model s documentation for all accepted arguments return torch tensor the tensor with training loss on this batch mean to average on multi gpu parallel training how the loss is computed by trainer by default all models return the loss in the first element subclass and override for custom behavior save past state if it exists todo this needs to be fixed and made cleaner later we don t use loss here since the model may return tuples instead of modeloutput whether or not this process is the local e g on one machine if training in a distributed fashion on several machines main process whether or not this process is the global main process when training in a distributed fashion on several machines this is only going to be true for one process special case for sagemaker modelparallel since there process_index is dp_process_index not the global process index will save the model so you can reload it using from_pretrained will only save from the main process calling the state_dict needs to be done on the wrapped model and on all processes user_content pt indicates model state_dict saved with smp 1 10 remove the dummy state_dict push to the hub when save_model is called by the user save a trained model and configuration using save_pretrained they can then be reloaded using from_pretrained if we are executing this function we are the process zero so we don t check for that save a trained model and configuration using save_pretrained they can then be reloaded using from_pretrained good practice save your training arguments together with the trained model storing the number of floating point operations that went into the model make sure we don t delete the best model check if we should delete older checkpoint s if save_total_limit 1 with load_best_model_at_end true we could end up deleting the last checkpoint which we don t do to allow resuming run evaluation and returns metrics the calling script will be responsible for providing a method to compute metrics as they are task dependent pass it to the init compute_metrics argument you can also subclass and override this method to inject custom behavior args eval_dataset dataset optional pass a dataset if you wish to override self eval_dataset if it is a datasets dataset columns not accepted by the model forward method are automatically removed it must implement the __len__ method ignore_keys list str optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metric_key_prefix str optional defaults to eval an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named eval_bleu if the prefix is eval default returns a dictionary containing the evaluation loss and the potential metrics computed from the predictions the dictionary also contains the epoch number which comes from the training state memory metrics must set up as early as possible no point gathering the predictions if there are no metrics otherwise we defer to self args prediction_loss_only tpu comment logging debug metrics for pytorch xla compile execute times ops etc run prediction and returns predictions and potential metrics depending on the dataset and your use case your test dataset may contain labels in that case this method will also return metrics like in evaluate args test_dataset dataset dataset to run the predictions on if it is an datasets dataset columns not accepted by the model forward method are automatically removed has to implement the method __len__ ignore_keys list str optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metric_key_prefix str optional defaults to test an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named test_bleu if the prefix is test default tip if your predictions or labels have different sequence length for instance because you re doing dynamic padding in a token classification task the predictions will be padded on the right to allow for concatenation into one array the padding index is 100 tip returns namedtuple a namedtuple with the following keys predictions np ndarray the predictions on test_dataset label_ids np ndarray optional the labels if the dataset contained some metrics dict str float optional the potential dictionary of metrics if the dataset contained labels memory metrics must set up as early as possible prediction evaluation loop shared by trainer evaluate and trainer predict works both with or without labels if eval is called w o train handle model prep here for the rest of this function model is the outside model whether it was wrapped or not backward compatibility if full fp16 or bf16 eval is wanted and this evaluation or predict isn t called while train is running cast it to the right dtype first and then put on device do this before wrapping initialize containers losses preds labels on gpu tpu accumulated for eval_accumulation_steps losses preds labels on cpu final containers will be useful when we have an iterable dataset so don t know its length main evaluation loop update the observed num examples for batch samplers batch_size is not known by the dataloader in advance prediction step update containers on host gather all tensors and put them back on the cpu if we have done enough accumulation steps set back to none to begin a new accumulation after all calls to gather_function reset to gather_for_metrics clean the state at the end of the evaluation loop gather all remaining tensors and put them back on the cpu number of samples the instance check is weird and does not actually check for the type but whether the dataset has the right methods therefore we need to make sure it also has the attribute both len dataloader dataset and len dataloader fail metrics to be json serializable we need to remove numpy types or zero d tensors prefix all keys with metric_key_prefix _ gather value of tensors tensor or list tuple of nested tensors and convert them to numpy before concatenating them to gathered perform an evaluation step on model using inputs subclass and override to inject custom behavior args model nn module the model to evaluate inputs dict str union torch tensor any the inputs and targets of the model the dictionary will be unpacked before being fed to the model most models expect the targets under the argument labels check your model s documentation for all accepted arguments prediction_loss_only bool whether or not to return the loss only ignore_keys list str optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions return tuple optional torch tensor optional torch tensor optional torch tensor a tuple with the loss logits and labels each being optional for clip like models capable of returning loss values if return_loss is not specified or being none in inputs we check if the default value of return_loss is true in model forward labels may be popped when computing the loss label smoothing for instance so we grab them first todo this needs to be fixed and made cleaner later for models that inherit from pretrainedmodel uses that method to compute the number of floating point operations for every backward forward pass if using another model either implement such a method in the model or subclass and override this method args inputs dict str union torch tensor any the inputs and targets of the model returns int the number of floating point operations initializes a git repo in self args hub_model_id only on process zero creates a draft of a model card using the information available to the trainer args language str optional the language of the model if applicable license str optional the license of the model will default to the license of the pretrained model used if the original model given to the trainer comes from a repo on the hub tags str or list str optional some tags to be included in the metadata of the model card model_name str optional the name of the model finetuned_from str optional the name of the model used to fine tune this one if applicable will default to the name of the repo of the original model given to the trainer if it comes from the hub tasks str or list str optional one or several task identifiers to be included in the metadata of the model card dataset_tags str or list str optional one or several dataset tags to be included in the metadata of the model card dataset str or list str optional one or several dataset identifiers to be included in the metadata of the model card dataset_args str or list str optional one or several dataset arguments to be included in the metadata of the model card only push from one node if we haven t finished the last push we don t do this one unless args hub_always_push true to avoid a new synchronization of all model weights we just copy the file from the checkpoint folder saving the tokenizer is fast and we don t know how many files it may have spawned so we resave it to be sure same for the training arguments upload self model and self tokenizer to the model hub on the repo self args hub_model_id parameters commit_message str optional defaults to end of training message to commit while pushing blocking bool optional defaults to true whether the function should return only when the git push has finished kwargs dict str any optional additional keyword arguments passed along to trainer create_model_card returns the url of the repository where the model was pushed if blocking false or a future object tracking the progress of the commit if blocking true in case the user calls this method with args push_to_hub false needs to be executed on all processes for tpu training but will only save on the processed determined by self args should_save only push from one node wait for the current upload to be finished deprecated code prediction evaluation loop shared by trainer evaluate and trainer predict works both with or without labels if eval is called w o train handle model prep here for the rest of this function model is the outside model whether it was wrapped or not backward compatibility if full fp16 or bf16 eval is wanted and this evaluation or predict isn t called while train is running cast it to the right dtype first and then put on device the actual number of eval_sample can be greater than num_examples in distributed settings when we pass a batch size to the sampler gather all tensors and put them back on the cpu if we have done enough accumulation steps set back to none to begin a new accumulation clean the state at the end of the evaluation loop gather all remaining tensors and put them back on the cpu to be json serializable we need to remove numpy types or zero d tensors prefix all keys with metric_key_prefix _ gather value of tensors tensor or list tuple of nested tensors and convert them to numpy before concatenating them to gathered add sagemaker checkpointing patterns to gitignore file make sure we only do this on the main process get current gitignore content add the patterns to gitignore write the gitignore file if it has changed avoid race condition with git status create accelerator object some trainer classes need to use gather instead of gather_for_metrics thus we store a flag deepspeed and accelerate flags covering both trainer args and accelerate launcher post accelerator creation setup
import contextlib import copy import functools import glob import importlib.metadata import inspect import math import os import random import re import shutil import sys import time import warnings from collections.abc import Mapping from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union from .integrations import ( get_reporting_integration_callbacks, hp_params, ) import huggingface_hub.utils as hf_hub_utils import numpy as np import torch import torch.distributed as dist from huggingface_hub import create_repo, upload_folder from packaging import version from torch import nn from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler from . import __version__ from .configuration_utils import PretrainedConfig from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator from .debug_utils import DebugOption, DebugUnderflowOverflow from .hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS, default_hp_search_backend from .integrations.deepspeed import deepspeed_init, deepspeed_load_checkpoint, is_deepspeed_available from .modelcard import TrainingSummary from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES from .optimization import Adafactor, get_scheduler from .pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_less_than_1_11 from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( CallbackHandler, DefaultFlowCallback, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState, ) from .trainer_pt_utils import ( DistributedTensorGatherer, IterableDatasetShard, LabelSmoother, LengthGroupedSampler, SequentialDistributedSampler, distributed_broadcast_scalars, distributed_concat, find_batch_size, get_dataloader_sampler, get_model_param_count, get_module_class_from_name, get_parameter_names, nested_concat, nested_detach, nested_numpify, nested_xla_mesh_reduce, reissue_pt_warnings, remove_dummy_checkpoint, ) from .trainer_utils import ( PREFIX_CHECKPOINT_DIR, BestRun, EvalLoopOutput, EvalPrediction, HPSearchBackend, HubStrategy, IntervalStrategy, PredictionOutput, RemoveColumnsCollator, TrainerMemoryTracker, TrainOutput, default_compute_objective, denumpify_detensorize, enable_full_determinism, find_executable_batch_size, get_last_checkpoint, has_length, neftune_post_forward_hook, number_of_arguments, seed_worker, set_seed, speed_metrics, ) from .training_args import OptimizerNames, ParallelMode, TrainingArguments from .utils import ( ADAPTER_CONFIG_NAME, ADAPTER_SAFE_WEIGHTS_NAME, ADAPTER_WEIGHTS_NAME, CONFIG_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, PushInProgress, can_return_loss, find_labels, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_datasets_available, is_in_notebook, is_ipex_available, is_peft_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_compile_available, is_torch_neuroncore_available, is_torch_npu_available, is_torch_tpu_available, logging, strtobool, ) from .utils.quantization_config import QuantizationMethod DEFAULT_CALLBACKS = [DefaultFlowCallback] DEFAULT_PROGRESS_CALLBACK = ProgressCallback if is_in_notebook(): from .utils.notebook import NotebookProgressCallback DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback if is_apex_available(): from apex import amp if is_datasets_available(): import datasets if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met if is_sagemaker_mp_enabled(): import smdistributed.modelparallel.torch as smp from smdistributed.modelparallel import __version__ as SMP_VERSION IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat else: IS_SAGEMAKER_MP_POST_1_10 = False if is_safetensors_available(): import safetensors.torch if is_peft_available(): from peft import PeftModel if is_accelerate_available(): from accelerate import Accelerator, skip_first_batches from accelerate import __version__ as accelerate_version from accelerate.utils import ( DistributedDataParallelKwargs, GradientAccumulationPlugin, load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer, ) DATA_SAMPLERS = [RandomSampler] if version.parse(accelerate_version) > version.parse("0.23.0"): from accelerate.data_loader import SeedableRandomSampler DATA_SAMPLERS += [SeedableRandomSampler] if is_deepspeed_available(): from accelerate.utils import DeepSpeedSchedulerWrapper if TYPE_CHECKING: import optuna logger = logging.get_logger(__name__) TRAINING_ARGS_NAME = "training_args.bin" TRAINER_STATE_NAME = "trainer_state.json" OPTIMIZER_NAME = "optimizer.pt" OPTIMIZER_NAME_BIN = "optimizer.bin" SCHEDULER_NAME = "scheduler.pt" SCALER_NAME = "scaler.pt" FSDP_MODEL_NAME = "pytorch_model_fsdp" class Trainer: from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state def __init__( self, model: Union[PreTrainedModel, nn.Module] = None, args: TrainingArguments = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, callbacks: Optional[List[TrainerCallback]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, ): if args is None: output_dir = "tmp_trainer" logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") args = TrainingArguments(output_dir=output_dir) self.args = args enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.hp_name = None self.deepspeed = None self.is_in_train = False self.create_accelerator_and_postprocess() self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) self._memory_tracker.start() log_level = args.get_process_log_level() logging.set_verbosity(log_level) args._setup_devices if model is None: if model_init is not None: self.model_init = model_init model = self.call_model_init() else: raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument") else: if model_init is not None: warnings.warn( "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will" " overwrite your model when calling the `train` method. This will become a fatal error in the next" " release.", FutureWarning, ) self.model_init = model_init if model.__class__.__name__ in MODEL_MAPPING_NAMES: raise ValueError( f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only " "computes hidden states and does not accept any labels. You should choose a model with a head " "suitable for your task like any of the `AutoModelForXxx` listed at " "https://huggingface.co/docs/transformers/model_doc/auto" ) if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel: self.is_model_parallel = True else: self.is_model_parallel = False if getattr(model, "hf_device_map", None) is not None: devices = [device for device in set(model.hf_device_map.values()) if device not in ["cpu", "disk"]] if len(devices) > 1: self.is_model_parallel = True elif len(devices) == 1: self.is_model_parallel = self.args.device != torch.device(devices[0]) else: self.is_model_parallel = False if self.is_model_parallel: logger.info( "You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set" " to `True` to avoid any unexpected behavior such as device placement mismatching." ) _is_peft_model = is_peft_available() and isinstance(model, PeftModel) _is_quantized_and_base_model = getattr(model, "is_quantized", False) and not getattr( model, "_hf_peft_config_loaded", False ) if _is_quantized_and_base_model and not _is_peft_model: raise ValueError( "You cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of" " the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft" " for more details" ) elif _is_quantized_and_base_model and not getattr(model, "_is_quantized_training_enabled", False): raise ValueError( "The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit" " model, please make sure that you have installed `bitsandbytes>=0.37.0`. " ) self.is_fsdp_xla_enabled = args.fsdp_config["xla"] if len(args.fsdp) > 0: if self.is_deepspeed_enabled: raise ValueError( "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags." ) if not args.fsdp_config["xla"] and args.parallel_mode != ParallelMode.DISTRIBUTED: raise ValueError("Using fsdp only works in distributed training.") self.place_model_on_device = args.place_model_on_device if ( self.is_model_parallel or self.is_deepspeed_enabled or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) or self.is_fsdp_xla_enabled or self.is_fsdp_enabled ): self.place_model_on_device = False default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer) self.data_collator = data_collator if data_collator is not None else default_collator self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.tokenizer = tokenizer if ( self.place_model_on_device and not getattr(model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES ): self._move_model_to_device(model, args.device) if self.is_model_parallel: self.args._n_gpu = 1 self.model_wrapped = model self.model = model self.neftune_noise_alpha = args.neftune_noise_alpha self.compute_metrics = compute_metrics self.preprocess_logits_for_metrics = preprocess_logits_for_metrics self.optimizer, self.lr_scheduler = optimizers if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): raise RuntimeError( "Passing a `model_init` is incompatible with providing the `optimizers` argument. " "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) if is_torch_tpu_available() and self.optimizer is not None: for param in self.model.parameters(): model_device = param.device break for param_group in self.optimizer.param_groups: if len(param_group["params"]) > 0: optimizer_device = param_group["params"][0].device break if model_device != optimizer_device: raise ValueError( "The model and the optimizer parameters are not on the same device, which probably means you" " created an optimizer around your model **before** putting on the device and passing it to the" " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." ) if (self.is_deepspeed_enabled or self.is_fsdp_xla_enabled or self.is_fsdp_enabled) and ( self.optimizer is not None or self.lr_scheduler is not None ): raise RuntimeError( "Passing `optimizers` is not allowed if Deepspeed or PyTorch FSDP is enabled. " "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks self.callback_handler = CallbackHandler( callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler ) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) self._loggers_initialized = False self.hub_model_id = None if self.args.push_to_hub: self.init_hf_repo() if self.args.should_save: os.makedirs(self.args.output_dir, exist_ok=True) if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).") if args.max_steps > 0: logger.info("max_steps is given, it will override any value given in num_train_epochs") if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: raise ValueError( "The train_dataset does not implement __len__, max_steps has to be specified. " "The number of steps needs to be known in advance for the learning rate scheduler." ) if ( train_dataset is not None and isinstance(train_dataset, torch.utils.data.IterableDataset) and args.group_by_length ): raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") self._signature_columns = None self.use_apex = False self.use_cpu_amp = False if is_sagemaker_mp_enabled(): if args.bf16: raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ") if IS_SAGEMAKER_MP_POST_1_10: if args.fp16 != smp.state.cfg.fp16: logger.warning( f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " f"but FP16 provided in trainer argument is {args.fp16}, " f"setting to {smp.state.cfg.fp16}" ) args.fp16 = smp.state.cfg.fp16 else: if hasattr(smp.state.cfg, "fp16"): logger.warning( f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer." ) if (args.fp16 or args.bf16) and args.half_precision_backend == "auto": if args.device == torch.device("cpu"): if args.fp16: raise ValueError("Tried to use `fp16` but it is not supported on cpu") else: args.half_precision_backend = "cpu_amp" logger.info(f"Using {args.half_precision_backend} half precision backend") if (args.fp16 or args.bf16) and not (self.is_deepspeed_enabled or is_sagemaker_mp_enabled()): if args.half_precision_backend == "cpu_amp": self.use_cpu_amp = True self.amp_dtype = torch.bfloat16 elif args.half_precision_backend == "apex": if not is_apex_available(): raise ImportError( "Using FP16 with APEX but APEX is not installed, please refer to" " https://www.github.com/nvidia/apex." ) self.use_apex = True if self.args.label_smoothing_factor != 0: self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) else: self.label_smoother = None self.state = TrainerState( is_local_process_zero=self.is_local_process_zero(), is_world_process_zero=self.is_world_process_zero(), ) self.control = TrainerControl() self.current_flos = 0 self.hp_search_backend = None self.use_tune_checkpoints = False default_label_names = find_labels(self.model.__class__) self.label_names = default_label_names if self.args.label_names is None else self.args.label_names self.can_return_loss = can_return_loss(self.model.__class__) self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) self._train_batch_size = args.train_batch_size self._created_lr_scheduler = False self._memory_tracker.stop_and_update_metrics() if args.torch_compile and not is_torch_compile_available(): raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.") def _activate_neftune(self, model): r unwrapped_model = unwrap_model(model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() del unwrapped_model embeddings.neftune_noise_alpha = self.neftune_noise_alpha hook_handle = embeddings.register_forward_hook(neftune_post_forward_hook) self.neftune_hook_handle = hook_handle return model def _deactivate_neftune(self, model): if not hasattr(self, "neftune_hook_handle"): raise ValueError("Neftune is not activated make sure to call `trainer._activate_neftune()` first") unwrapped_model = unwrap_model(model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): embeddings = unwrapped_model.base_model.model.get_input_embeddings() else: embeddings = unwrapped_model.get_input_embeddings() self.neftune_hook_handle.remove() del embeddings.neftune_noise_alpha, unwrapped_model def add_callback(self, callback): self.callback_handler.add_callback(callback) def pop_callback(self, callback): return self.callback_handler.pop_callback(callback) def remove_callback(self, callback): self.callback_handler.remove_callback(callback) def _move_model_to_device(self, model, device): model = model.to(device) if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"): model.tie_weights() def _set_signature_columns_if_needed(self): if self._signature_columns is None: signature = inspect.signature(self.model.forward) self._signature_columns = list(signature.parameters.keys()) self._signature_columns += list(set(["label", "label_ids"] + self.label_names)) def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): if not self.args.remove_unused_columns: return dataset self._set_signature_columns_if_needed() signature_columns = self._signature_columns ignored_columns = list(set(dataset.column_names) - set(signature_columns)) if len(ignored_columns) > 0: dset_description = "" if description is None else f"in the {description} set" logger.info( f"The following columns {dset_description} don't have a corresponding argument in " f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, " " you can safely ignore this message." ) columns = [k for k in signature_columns if k in dataset.column_names] if version.parse(datasets.__version__) < version.parse("1.4.0"): dataset.set_format( type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"] ) return dataset else: return dataset.remove_columns(ignored_columns) def _get_collator_with_removed_columns( self, data_collator: Callable, description: Optional[str] = None ) -> Callable: if not self.args.remove_unused_columns: return data_collator self._set_signature_columns_if_needed() signature_columns = self._signature_columns remove_columns_collator = RemoveColumnsCollator( data_collator=data_collator, signature_columns=signature_columns, logger=logger, description=description, model_name=self.model.__class__.__name__, ) return remove_columns_collator def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: if self.train_dataset is None or not has_length(self.train_dataset): return None if self.args.group_by_length: if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset): lengths = ( self.train_dataset[self.args.length_column_name] if self.args.length_column_name in self.train_dataset.column_names else None ) else: lengths = None model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None return LengthGroupedSampler( self.args.train_batch_size * self.args.gradient_accumulation_steps, dataset=self.train_dataset, lengths=lengths, model_input_name=model_input_name, ) else: return RandomSampler(self.train_dataset) def get_train_dataloader(self) -> DataLoader: if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") train_dataset = self.train_dataset data_collator = self.data_collator if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): train_dataset = self._remove_unused_columns(train_dataset, description="training") else: data_collator = self._get_collator_with_removed_columns(data_collator, description="training") dataloader_params = { "batch_size": self._train_batch_size, "collate_fn": data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "persistent_workers": self.args.dataloader_persistent_workers, } if not isinstance(train_dataset, torch.utils.data.IterableDataset): dataloader_params["sampler"] = self._get_train_sampler() dataloader_params["drop_last"] = self.args.dataloader_drop_last dataloader_params["worker_init_fn"] = seed_worker return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params)) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: if self.args.use_legacy_prediction_loop: if is_torch_tpu_available(): return SequentialDistributedSampler( eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() ) elif is_sagemaker_mp_enabled(): return SequentialDistributedSampler( eval_dataset, num_replicas=smp.dp_size(), rank=smp.dp_rank(), batch_size=self.args.per_device_eval_batch_size, ) else: return SequentialSampler(eval_dataset) if self.args.world_size <= 1: return SequentialSampler(eval_dataset) else: return None def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset data_collator = self.data_collator if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation") else: data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation") dataloader_params = { "batch_size": self.args.eval_batch_size, "collate_fn": data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "persistent_workers": self.args.dataloader_persistent_workers, } if not isinstance(eval_dataset, torch.utils.data.IterableDataset): dataloader_params["sampler"] = self._get_eval_sampler(eval_dataset) dataloader_params["drop_last"] = self.args.dataloader_drop_last return self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params)) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: data_collator = self.data_collator if is_datasets_available() and isinstance(test_dataset, datasets.Dataset): test_dataset = self._remove_unused_columns(test_dataset, description="test") else: data_collator = self._get_collator_with_removed_columns(data_collator, description="test") dataloader_params = { "batch_size": self.args.eval_batch_size, "collate_fn": data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "persistent_workers": self.args.dataloader_persistent_workers, } if not isinstance(test_dataset, torch.utils.data.IterableDataset): dataloader_params["sampler"] = self._get_eval_sampler(test_dataset) dataloader_params["drop_last"] = self.args.dataloader_drop_last return self.accelerator.prepare(DataLoader(test_dataset, **dataloader_params)) def create_optimizer_and_scheduler(self, num_training_steps: int): self.create_optimizer() if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: optimizer = self.optimizer.optimizer else: optimizer = self.optimizer self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) def get_decay_parameter_names(self, model) -> List[str]: decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS) decay_parameters = [name for name in decay_parameters if "bias" not in name] return decay_parameters def create_optimizer(self): opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.optimizer is None: decay_parameters = self.get_decay_parameter_names(opt_model) optimizer_grouped_parameters = [ { "params": [ p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) ], "weight_decay": self.args.weight_decay, }, { "params": [ p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) ], "weight_decay": 0.0, }, ] optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) if optimizer_cls.__name__ == "Adam8bit": import bitsandbytes manager = bitsandbytes.optim.GlobalOptimManager.get_instance() skipped = 0 for module in opt_model.modules(): if isinstance(module, nn.Embedding): skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) logger.info(f"skipped {module}: {skipped/2**20}M params") manager.register_module_override(module, "weight", {"optim_bits": 32}) logger.debug(f"bitsandbytes: will optimize {module} in fp32") logger.info(f"skipped: {skipped/2**20}M params") if is_sagemaker_mp_enabled(): self.optimizer = smp.DistributedOptimizer(self.optimizer) return self.optimizer @staticmethod def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]: optim_args = {} if args.optim_args: for mapping in args.optim_args.replace(" ", "").split(","): key, value = mapping.split("=") optim_args[key] = value optimizer_kwargs = {"lr": args.learning_rate} adam_kwargs = { "betas": (args.adam_beta1, args.adam_beta2), "eps": args.adam_epsilon, } if args.optim == OptimizerNames.ADAFACTOR: optimizer_cls = Adafactor optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) elif args.optim == OptimizerNames.ADAMW_HF: from .optimization import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: from torch.optim import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: optimizer_kwargs.update({"fused": True}) elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: try: from torch_xla.amp.syncfree import AdamW optimizer_cls = AdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED: try: from torch_npu.optim import NpuFusedAdamW optimizer_cls = NpuFusedAdamW optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer failed to import FusedAdamW from torch_npu.") elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: try: from apex.optimizers import FusedAdam optimizer_cls = FusedAdam optimizer_kwargs.update(adam_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") elif args.optim in [ OptimizerNames.ADAMW_BNB, OptimizerNames.ADAMW_8BIT, OptimizerNames.PAGED_ADAMW, OptimizerNames.PAGED_ADAMW_8BIT, OptimizerNames.LION, OptimizerNames.LION_8BIT, OptimizerNames.PAGED_LION, OptimizerNames.PAGED_LION_8BIT, ]: try: from bitsandbytes.optim import AdamW, Lion is_paged = False optim_bits = 32 optimizer_cls = None additional_optim_kwargs = adam_kwargs if "paged" in args.optim: is_paged = True if "8bit" in args.optim: optim_bits = 8 if "adam" in args.optim: optimizer_cls = AdamW elif "lion" in args.optim: optimizer_cls = Lion additional_optim_kwargs = {"betas": (args.adam_beta1, args.adam_beta2)} bnb_kwargs = {"is_paged": is_paged, "optim_bits": optim_bits} optimizer_kwargs.update(additional_optim_kwargs) optimizer_kwargs.update(bnb_kwargs) except ImportError: raise ValueError("Trainer tried to instantiate bnb optimizer but bnb is not installed!") if is_bitsandbytes_available() and version.parse( importlib.metadata.version("bitsandbytes") ) < version.parse("0.41.1"): logger.warning( "You are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. " "It is recommended to update your version as a major bug has been fixed in 8-bit optimizers." ) elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: try: from torchdistx.optimizers import AnyPrecisionAdamW optimizer_cls = AnyPrecisionAdamW optimizer_kwargs.update(adam_kwargs) optimizer_kwargs.update( { "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), "compensation_buffer_dtype": getattr( torch, optim_args.get("compensation_buffer_dtype", "bfloat16") ), } ) except ImportError: raise ValueError("Please install https://github.com/pytorch/torchdistx") elif args.optim == OptimizerNames.SGD: optimizer_cls = torch.optim.SGD elif args.optim == OptimizerNames.ADAGRAD: optimizer_cls = torch.optim.Adagrad elif args.optim == OptimizerNames.RMSPROP: optimizer_cls = torch.optim.RMSprop else: raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") return optimizer_cls, optimizer_kwargs def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): if self.lr_scheduler is None: self.lr_scheduler = get_scheduler( self.args.lr_scheduler_type, optimizer=self.optimizer if optimizer is None else optimizer, num_warmup_steps=self.args.get_warmup_steps(num_training_steps), num_training_steps=num_training_steps, scheduler_specific_kwargs=self.args.lr_scheduler_kwargs, ) self._created_lr_scheduler = True return self.lr_scheduler def num_examples(self, dataloader: DataLoader) -> int: try: dataset = dataloader.dataset if isinstance(dataset, IterableDatasetShard): return len(dataloader.dataset.dataset) return len(dataloader.dataset) except (NameError, AttributeError, TypeError): return len(dataloader) * self.args.per_device_train_batch_size def num_tokens(self, train_dl: DataLoader, max_steps: Optional[int] = None) -> int: train_tokens = 0 try: for step, batch in enumerate(train_dl): tokens = batch["input_ids"].numel() if max_steps is not None: return tokens * max_steps train_tokens += tokens return train_tokens except KeyError: logger.warning("Cannot get num_tokens from dataloader") return train_tokens def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): self._trial = trial if self.hp_search_backend is None or trial is None: return if self.hp_search_backend == HPSearchBackend.OPTUNA: params = self.hp_space(trial) elif self.hp_search_backend == HPSearchBackend.RAY: params = trial params.pop("wandb", None) elif self.hp_search_backend == HPSearchBackend.SIGOPT: params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} elif self.hp_search_backend == HPSearchBackend.WANDB: params = trial for key, value in params.items(): if not hasattr(self.args, key): logger.warning( f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" " `TrainingArguments`." ) continue old_attr = getattr(self.args, key, None) if old_attr is not None: value = type(old_attr)(value) setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: logger.info(f"Trial: {trial.params}") if self.hp_search_backend == HPSearchBackend.SIGOPT: logger.info(f"SigOpt Assignments: {trial.assignments}") if self.hp_search_backend == HPSearchBackend.WANDB: logger.info(f"W&B Sweep parameters: {trial}") if self.is_deepspeed_enabled: if self.args.deepspeed is None: raise ValueError("For sweeps with deepspeed, `args.deepspeed` must be set") from accelerate.utils import DeepSpeedPlugin from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) self.args.hf_deepspeed_config.trainer_config_process(self.args) self.args.deepspeed_plugin = DeepSpeedPlugin(hf_ds_config=self.args.hf_deepspeed_config) self.create_accelerator_and_postprocess() def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]): if self.hp_search_backend is None or trial is None: return self.objective = self.compute_objective(metrics.copy()) if self.hp_search_backend == HPSearchBackend.OPTUNA: import optuna if not trial.study._is_multi_objective(): trial.report(self.objective, step) if trial.should_prune(): self.callback_handler.on_train_end(self.args, self.state, self.control) raise optuna.TrialPruned() elif self.hp_search_backend == HPSearchBackend.RAY: from ray import tune if self.control.should_save: self._tune_save_checkpoint() tune.report(objective=self.objective, **metrics) def _tune_save_checkpoint(self): from ray import tune if not self.use_tune_checkpoints: return with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir: output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") self.save_model(output_dir, _internal_call=True) if self.args.should_save: self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) def call_model_init(self, trial=None): model_init_argcount = number_of_arguments(self.model_init) if model_init_argcount == 0: model = self.model_init() elif model_init_argcount == 1: model = self.model_init(trial) else: raise RuntimeError("model_init should have 0 or 1 argument.") if model is None: raise RuntimeError("model_init should not return None.") return model def torch_jit_model_eval(self, model, dataloader, training=False): if not training: if dataloader is None: logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") return model example_batch = next(iter(dataloader)) example_batch = self._prepare_inputs(example_batch) try: jit_model = copy.copy(model) jit_model.eval() original_forward = jit_model.__dict__.pop("_original_forward", None) if original_forward: jit_model.forward = original_forward with self.accelerator.autocast(cache_enabled=False), torch.no_grad(): if version.parse(version.parse(torch.__version__).base_version) >= version.parse("2.0.0"): if isinstance(example_batch, dict): jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) else: jit_model = torch.jit.trace( jit_model, example_kwarg_inputs={key: example_batch[key] for key in example_batch}, strict=False, ) else: jit_inputs = [] for key in example_batch: example_tensor = torch.ones_like(example_batch[key]) jit_inputs.append(example_tensor) jit_inputs = tuple(jit_inputs) jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False) jit_model = torch.jit.freeze(jit_model) with torch.no_grad(): jit_model(**example_batch) jit_model(**example_batch) model = jit_model self.use_cpu_amp = False except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: logger.warning(f"failed to use PyTorch jit mode due to: {e}.") return model def ipex_optimize_model(self, model, training=False, dtype=torch.float32): if not is_ipex_available(): raise ImportError( "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer" " to https://github.com/intel/intel-extension-for-pytorch." ) import intel_extension_for_pytorch as ipex if not training: model.eval() dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train) else: if not model.training: model.train() model, self.optimizer = ipex.optimize( model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1" ) return model def _wrap_model(self, model, training=True, dataloader=None): if self.args.use_ipex: dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 model = self.ipex_optimize_model(model, training, dtype=dtype) if is_sagemaker_mp_enabled(): if isinstance(self.model_wrapped, smp.model.DistributedModel): return self.model_wrapped return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) if unwrap_model(model) is not model: return model if self.use_apex and training: model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) if self.args.n_gpu > 1 and not getattr(model, "is_loaded_in_8bit", False): model = nn.DataParallel(model) if self.args.jit_mode_eval: start_time = time.time() model = self.torch_jit_model_eval(model, dataloader, training) self.jit_compilation_time = round(time.time() - start_time, 4) if not training: return model if self.is_fsdp_xla_enabled: try: from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP from torch_xla.distributed.fsdp import checkpoint_module from torch_xla.distributed.fsdp.wrap import ( size_based_auto_wrap_policy, transformer_auto_wrap_policy, ) except ImportError: raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") auto_wrap_policy = None auto_wrapper_callable = None default_transformer_cls_names_to_wrap = getattr(model, "_no_split_modules", None) fsdp_transformer_layer_cls_to_wrap = self.args.fsdp_config.get( "transformer_layer_cls_to_wrap", default_transformer_cls_names_to_wrap ) if self.args.fsdp_config["min_num_params"] > 0: auto_wrap_policy = functools.partial( size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["min_num_params"] ) elif fsdp_transformer_layer_cls_to_wrap is not None: transformer_cls_to_wrap = set() for layer_class in fsdp_transformer_layer_cls_to_wrap: transformer_cls = get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception("Could not find the transformer layer class to wrap in the model.") else: transformer_cls_to_wrap.add(transformer_cls) auto_wrap_policy = functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap, ) fsdp_kwargs = self.args.xla_fsdp_config if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: def auto_wrapper_callable(m, *args, **kwargs): return FSDP(checkpoint_module(m), *args, **kwargs) self.model = model = FSDP( model, auto_wrap_policy=auto_wrap_policy, auto_wrapper_callable=auto_wrapper_callable, **fsdp_kwargs, ) def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): loss = optimizer.step(**optimizer_args) if barrier: xm.mark_step() return loss xm.optimizer_step = patched_optimizer_step elif is_sagemaker_dp_enabled(): model = nn.parallel.DistributedDataParallel( model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] ) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: if is_torch_neuroncore_available(): return model kwargs = {} if self.args.ddp_find_unused_parameters is not None: kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters elif isinstance(model, PreTrainedModel): kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing else: kwargs["find_unused_parameters"] = True if self.args.ddp_bucket_cap_mb is not None: kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb if self.args.ddp_broadcast_buffers is not None: kwargs["broadcast_buffers"] = self.args.ddp_broadcast_buffers self.accelerator.ddp_handler = DistributedDataParallelKwargs(**kwargs) return model def train( self, resume_from_checkpoint: Optional[Union[str, bool]] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None, ignore_keys_for_eval: Optional[List[str]] = None, **kwargs, ): if resume_from_checkpoint is False: resume_from_checkpoint = None self._memory_tracker.start() args = self.args self.is_in_train = True if self.neftune_noise_alpha is not None: self.model = self._activate_neftune(self.model) if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train: self._move_model_to_device(self.model, args.device) if "model_path" in kwargs: resume_from_checkpoint = kwargs.pop("model_path") warnings.warn( "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " "instead.", FutureWarning, ) if len(kwargs) > 0: raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") self._hp_search_setup(trial) self._train_batch_size = self.args.train_batch_size model_reloaded = False if self.model_init is not None: enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) self.model = self.call_model_init(trial) model_reloaded = True self.optimizer, self.lr_scheduler = None, None if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: resume_from_checkpoint = get_last_checkpoint(args.output_dir) if resume_from_checkpoint is None: raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})") if ( resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and not self.is_deepspeed_enabled and not self.is_fsdp_enabled ): self._load_from_checkpoint(resume_from_checkpoint) if model_reloaded: if self.place_model_on_device: self._move_model_to_device(self.model, args.device) self.model_wrapped = self.model inner_training_loop = find_executable_batch_size( self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size ) if args.push_to_hub: try: hf_hub_utils.disable_progress_bars() return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, ) finally: hf_hub_utils.enable_progress_bars() else: return inner_training_loop( args=args, resume_from_checkpoint=resume_from_checkpoint, trial=trial, ignore_keys_for_eval=ignore_keys_for_eval, ) def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self.accelerator.free_memory() self._train_batch_size = batch_size logger.debug(f"Currently training with a batch size of: {self._train_batch_size}") train_dataloader = self.get_train_dataloader() total_train_batch_size = self._train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None num_train_tokens = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) num_train_samples = args.max_steps * total_train_batch_size if args.include_tokens_per_second: num_train_tokens = ( self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps ) else: max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs if args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader) * args.num_train_epochs elif args.max_steps > 0: max_steps = args.max_steps num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size if args.include_tokens_per_second: num_train_tokens = self.num_tokens(train_dataloader, args.max_steps) * args.gradient_accumulation_steps else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torchrun or torch.distributed.launch (deprecated))." ) else: debug_overflow = DebugUnderflowOverflow(self.model) delay_optimizer_creation = is_sagemaker_mp_enabled() or self.is_fsdp_xla_enabled or self.is_fsdp_enabled if self._created_lr_scheduler: self.lr_scheduler = None self._created_lr_scheduler = False if self.is_deepspeed_enabled: self.optimizer, self.lr_scheduler = deepspeed_init(self, num_training_steps=max_steps) if not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None if args.logging_steps is not None: if args.logging_steps < 1: self.state.logging_steps = math.ceil(max_steps * args.logging_steps) else: self.state.logging_steps = args.logging_steps if args.eval_steps is not None: if args.eval_steps < 1: self.state.eval_steps = math.ceil(max_steps * args.eval_steps) else: self.state.eval_steps = args.eval_steps if args.save_steps is not None: if args.save_steps < 1: self.state.save_steps = math.ceil(max_steps * args.save_steps) else: self.state.save_steps = args.save_steps if args.gradient_checkpointing: if args.gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {} else: gradient_checkpointing_kwargs = args.gradient_checkpointing_kwargs self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model = self._wrap_model(self.model_wrapped) use_accelerator_prepare = True if model is self.model else False if delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) if use_accelerator_prepare: self.model.train() if hasattr(self.lr_scheduler, "step"): if self.use_apex: model = self.accelerator.prepare(self.model) else: model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) else: model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( self.model, self.optimizer, self.lr_scheduler ) if self.is_fsdp_enabled: self.model = self.model_wrapped = model if model is not self.model: self.model_wrapped = model if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped if resume_from_checkpoint is not None: if self.is_deepspeed_enabled: deepspeed_load_checkpoint(self.model_wrapped, resume_from_checkpoint) elif is_sagemaker_mp_enabled() or self.is_fsdp_enabled: self._load_from_checkpoint(resume_from_checkpoint, self.model_wrapped) self._load_optimizer_and_scheduler(resume_from_checkpoint) logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples:,}") logger.info(f" Num Epochs = {num_train_epochs:,}") logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}") if self.args.per_device_train_batch_size != self._train_batch_size: logger.info(f" Training with DataParallel so batch size has been adjusted to: {self._train_batch_size:,}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size:,}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps:,}") logger.info(f" Number of trainable parameters = {get_model_param_count(model, trainable_only=True):,}") self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) epochs_trained = self.state.global_step // num_update_steps_per_epoch if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info(f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first" f" {steps_trained_in_current_epoch} batches in the first epoch." ) self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() tr_loss = torch.tensor(0.0).to(args.device) self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() self.control = self.callback_handler.on_train_begin(args, self.state, self.control) if not args.ignore_data_skip: for epoch in range(epochs_trained): sampler = get_dataloader_sampler(train_dataloader) sampler_kinds = [RandomSampler] if version.parse(accelerate_version) > version.parse("0.23.0"): sampler_kinds.append(SeedableRandomSampler) is_random_sampler = isinstance(sampler, tuple(sampler_kinds)) if is_torch_less_than_1_11 or not is_random_sampler: for _ in train_dataloader: break else: sampler = sampler if sampler is not None else [] _ = list(sampler) total_batched_samples = 0 for epoch in range(epochs_trained, num_train_epochs): epoch_iterator = train_dataloader if hasattr(epoch_iterator, "set_epoch"): epoch_iterator.set_epoch(epoch) if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_iterator) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False steps_skipped = 0 if steps_trained_in_current_epoch > 0: epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch) steps_skipped = steps_trained_in_current_epoch steps_trained_in_current_epoch = 0 rng_to_sync = True step = -1 for step, inputs in enumerate(epoch_iterator): total_batched_samples += 1 if self.args.include_num_input_tokens_seen: main_input_name = getattr(self.model, "main_input_name", "input_ids") if main_input_name not in inputs: logger.warning( "Tried to track the number of tokens seen, however the current model is " "not configured properly to know what item is the input. To fix this, add " "a `main_input_name` attribute to the model class you are using." ) else: self.state.num_input_tokens_seen += self.accelerator.gather(inputs[main_input_name]).numel() if rng_to_sync: self._load_rng_state(resume_from_checkpoint) rng_to_sync = False if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) with self.accelerator.accumulate(model): tr_loss_step = self.training_step(model, inputs) if ( args.logging_nan_inf_filter and not is_torch_tpu_available() and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) else: tr_loss += tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) is_last_step_and_steps_less_than_grad_acc = ( steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ) if ( total_batched_samples % args.gradient_accumulation_steps == 0 or is_last_step_and_steps_less_than_grad_acc ): if is_last_step_and_steps_less_than_grad_acc: self.accelerator.gradient_state._set_sync_gradients(True) if args.max_grad_norm is not None and args.max_grad_norm > 0: if is_sagemaker_mp_enabled() and args.fp16: self.optimizer.clip_master_grads(args.max_grad_norm) elif self.use_apex: nn.utils.clip_grad_norm_( amp.master_params(self.optimizer), args.max_grad_norm, ) else: self.accelerator.clip_grad_norm_( model.parameters(), args.max_grad_norm, ) self.optimizer.step() optimizer_was_run = not self.accelerator.optimizer_step_was_skipped if optimizer_was_run: if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: break if step < 0: logger.warning( "There seems to be not a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_tpu_available(): xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): delattr(self, "_past") logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: if is_torch_tpu_available(): xm.rendezvous("load_best_model_at_end") elif args.parallel_mode == ParallelMode.DISTRIBUTED: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics( "train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps, num_tokens=num_train_tokens, ) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: for checkpoint in checkpoints_sorted: if not os.path.samefile(checkpoint, self.state.best_model_checkpoint): logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) self._finish_current_push() if self.neftune_noise_alpha is not None: self._deactivate_neftune(self.model) return TrainOutput(self.state.global_step, train_loss, metrics) def _get_output_dir(self, trial): if self.hp_search_backend is not None and trial is not None: if self.hp_search_backend == HPSearchBackend.OPTUNA: run_id = trial.number elif self.hp_search_backend == HPSearchBackend.RAY: from ray import tune run_id = tune.get_trial_id() elif self.hp_search_backend == HPSearchBackend.SIGOPT: run_id = trial.id elif self.hp_search_backend == HPSearchBackend.WANDB: import wandb run_id = wandb.run.id run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" run_dir = os.path.join(self.args.output_dir, run_name) else: run_dir = self.args.output_dir return run_dir def _load_from_checkpoint(self, resume_from_checkpoint, model=None): if model is None: model = self.model config_file = os.path.join(resume_from_checkpoint, CONFIG_NAME) adapter_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_WEIGHTS_NAME) adapter_safe_weights_file = os.path.join(resume_from_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) weights_file = os.path.join(resume_from_checkpoint, WEIGHTS_NAME) weights_index_file = os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) safe_weights_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_NAME) safe_weights_index_file = os.path.join(resume_from_checkpoint, SAFE_WEIGHTS_INDEX_NAME) is_fsdp_ckpt = os.path.isdir(resume_from_checkpoint) and any( FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(resume_from_checkpoint) if os.path.isdir(os.path.join(resume_from_checkpoint, folder_name)) ) if is_fsdp_ckpt and not self.is_fsdp_enabled: raise ValueError(f"Checkpoint found at {resume_from_checkpoint} is only supported when using PyTorch FSDP") if not ( any( os.path.isfile(f) for f in [ weights_file, safe_weights_file, weights_index_file, safe_weights_index_file, adapter_weights_file, adapter_safe_weights_file, ] ) or is_fsdp_ckpt ): raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") logger.info(f"Loading model from {resume_from_checkpoint}.") if os.path.isfile(config_file): config = PretrainedConfig.from_json_file(config_file) checkpoint_version = config.transformers_version if checkpoint_version is not None and checkpoint_version != __version__: logger.warning( f"You are resuming training from a checkpoint trained with {checkpoint_version} of " f"Transformers but your current version is {__version__}. This is not recommended and could " "yield to errors or unwanted behaviors." ) if os.path.isfile(weights_file) or os.path.isfile(safe_weights_file) or is_fsdp_ckpt: if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): smp.resume_from_checkpoint( path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False ) else: if hasattr(self.args, "fp16") and self.args.fp16 is True: logger.warning( "Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported." ) state_dict = torch.load(weights_file, map_location="cpu") state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) del state_dict elif self.is_fsdp_enabled: load_fsdp_model(self.accelerator.state.fsdp_plugin, self.accelerator, model, resume_from_checkpoint) else: if self.args.save_safetensors and os.path.isfile(safe_weights_file): state_dict = safetensors.torch.load_file(safe_weights_file, device="cpu") else: state_dict = torch.load(weights_file, map_location="cpu") load_result = model.load_state_dict(state_dict, False) del state_dict self._issue_warnings_after_load(load_result) elif is_peft_available() and isinstance(model, PeftModel): if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"): if os.path.exists(resume_from_checkpoint): model.load_adapter(resume_from_checkpoint, model.active_adapter, is_trainable=True) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") else: load_result = load_sharded_checkpoint( model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled(), prefer_safe=self.args.save_safetensors ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) def _load_best_model(self): logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) best_safe_model_path = os.path.join(self.state.best_model_checkpoint, SAFE_WEIGHTS_NAME) best_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_WEIGHTS_NAME) best_safe_adapter_model_path = os.path.join(self.state.best_model_checkpoint, ADAPTER_SAFE_WEIGHTS_NAME) model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model if self.is_deepspeed_enabled: deepspeed_load_checkpoint(self.model_wrapped, self.state.best_model_checkpoint) elif self.is_fsdp_enabled: load_result = load_fsdp_model( self.accelerator.state.fsdp_plugin, self.accelerator, model, self.state.best_model_checkpoint ) elif ( os.path.exists(best_model_path) or os.path.exists(best_safe_model_path) or os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path) ): has_been_loaded = True if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): smp.resume_from_checkpoint( path=self.state.best_model_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False, ) else: if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: state_dict = torch.load(best_model_path, map_location="cpu") state_dict["_smp_is_partial"] = False load_result = model.load_state_dict(state_dict, strict=True) else: if is_peft_available() and isinstance(model, PeftModel): if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"): if os.path.exists(best_adapter_model_path) or os.path.exists(best_safe_adapter_model_path): model.load_adapter(self.state.best_model_checkpoint, model.active_adapter) from torch.nn.modules.module import _IncompatibleKeys load_result = _IncompatibleKeys([], []) else: logger.warning( "The intermediate checkpoints of PEFT may not be saved correctly, " f"consider using a custom callback to save {ADAPTER_WEIGHTS_NAME} in corresponding saving folders. " "Check some examples here: https://github.com/huggingface/peft/issues/96" ) has_been_loaded = False else: logger.warning("Could not load adapter model, make sure to have `peft>=0.3.0` installed") has_been_loaded = False else: if self.args.save_safetensors and os.path.isfile(best_safe_model_path): state_dict = safetensors.torch.load_file(best_safe_model_path, device="cpu") else: state_dict = torch.load(best_model_path, map_location="cpu") load_result = model.load_state_dict(state_dict, False) if not is_sagemaker_mp_enabled() and has_been_loaded: self._issue_warnings_after_load(load_result) elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): load_result = load_sharded_checkpoint( model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() ) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) else: logger.warning( f"Could not locate the best model at {best_model_path}, if you are running a distributed training " "on multiple nodes, you should activate `--save_on_each_node`." ) def _issue_warnings_after_load(self, load_result): if len(load_result.missing_keys) != 0: if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( self.model._keys_to_ignore_on_save ): self.model.tie_weights() else: logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") if len(load_result.unexpected_keys) != 0: logger.warning( f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." ) def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval): if self.control.should_log: if is_torch_tpu_available(): xm.mark_step() logs: Dict[str, float] = {} tr_loss_scalar = self._nested_gather(tr_loss).mean().item() tr_loss -= tr_loss logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) logs["learning_rate"] = self._get_learning_rate() self._total_loss_scalar += tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.store_flos() self.log(logs) metrics = None if self.control.should_evaluate: if isinstance(self.eval_dataset, dict): metrics = {} for eval_dataset_name, eval_dataset in self.eval_dataset.items(): dataset_metrics = self.evaluate( eval_dataset=eval_dataset, ignore_keys=ignore_keys_for_eval, metric_key_prefix=f"eval_{eval_dataset_name}", ) metrics.update(dataset_metrics) else: metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) self._report_to_hp_search(trial, self.state.global_step, metrics) if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" self.lr_scheduler.step(metrics[metric_to_check]) if self.control.should_save: self._save_checkpoint(model, trial, metrics=metrics) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def _load_rng_state(self, checkpoint): if checkpoint is None: return if self.args.world_size > 1: process_index = self.args.process_index rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(checkpoint, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) if is_torch_tpu_available(): xm.set_rng_state(checkpoint_rng_state["xla"]) if is_torch_npu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: torch.npu.random.set_rng_state_all(checkpoint_rng_state["npu"]) else: try: torch.npu.random.set_rng_state(checkpoint_rng_state["npu"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the NPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def _save_checkpoint(self, model, trial, metrics=None): checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" if self.hp_search_backend is None and trial is None: self.store_flos() run_dir = self._get_output_dir(trial=trial) output_dir = os.path.join(run_dir, checkpoint_folder) self.save_model(output_dir, _internal_call=True) if not self.args.save_only_model: self._save_optimizer_and_scheduler(output_dir) self._save_rng_state(output_dir) if metrics is not None and self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics[metric_to_check] operator = np.greater if self.args.greater_is_better else np.less if ( self.state.best_metric is None or self.state.best_model_checkpoint is None or operator(metric_value, self.state.best_metric) ): self.state.best_metric = metric_value self.state.best_model_checkpoint = output_dir if self.args.should_save: self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) if self.args.push_to_hub: self._push_from_checkpoint(output_dir) if self.args.should_save: self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) def _save_rng_state(self, output_dir): rng_states = { "python": random.getstate(), "numpy": np.random.get_state(), "cpu": torch.random.get_rng_state(), } if torch.cuda.is_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["cuda"] = torch.cuda.random.get_rng_state_all() else: rng_states["cuda"] = torch.cuda.random.get_rng_state() if is_torch_tpu_available(): rng_states["xla"] = xm.get_rng_state() if is_torch_npu_available(): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: rng_states["npu"] = torch.npu.random.get_rng_state_all() else: rng_states["npu"] = torch.npu.random.get_rng_state() os.makedirs(output_dir, exist_ok=True) if self.args.world_size <= 1: torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) else: torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) def _save_optimizer_and_scheduler(self, output_dir): if is_torch_tpu_available(): xm.rendezvous("saving_optimizer_states") xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) elif is_sagemaker_mp_enabled(): opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) smp.barrier() if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: smp.save( opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME), partial=True, v3=smp.state.cfg.shard_optimizer_state, ) elif self.is_deepspeed_enabled: self.model_wrapped.save_checkpoint(output_dir) elif self.is_fsdp_enabled: save_fsdp_model(self.accelerator.state.fsdp_plugin, self.accelerator, self.model, output_dir) save_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, output_dir ) elif self.args.should_save: torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) is_deepspeed_custom_scheduler = self.is_deepspeed_enabled and not isinstance( self.lr_scheduler, DeepSpeedSchedulerWrapper ) if ( self.args.should_save and (not self.is_deepspeed_enabled or is_deepspeed_custom_scheduler) and not is_torch_tpu_available() ): with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) reissue_pt_warnings(caught_warnings) def _load_optimizer_and_scheduler(self, checkpoint): if checkpoint is None: return if self.is_deepspeed_enabled: if not isinstance(self.lr_scheduler, DeepSpeedSchedulerWrapper): with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) reissue_pt_warnings(caught_warnings) return checkpoint_file_exists = ( glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") if is_sagemaker_mp_enabled() else ( os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) or os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME_BIN)) or ( os.path.isdir(checkpoint) and any( OPTIMIZER_NAME_BIN.split(".")[0] in folder_name for folder_name in os.listdir(checkpoint) if os.path.isdir(os.path.join(checkpoint, folder_name)) ) ) ) ) if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): if is_torch_tpu_available(): optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu") with warnings.catch_warnings(record=True) as caught_warnings: lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu") reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(optimizer_state, self.args.device) xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) self.optimizer.load_state_dict(optimizer_state) self.lr_scheduler.load_state_dict(lr_scheduler_state) else: if is_sagemaker_mp_enabled(): if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): def opt_load_hook(mod, opt): opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) else: def opt_load_hook(mod, opt): if IS_SAGEMAKER_MP_POST_1_10: opt.load_state_dict( smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) ) else: opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) self.model_wrapped.register_post_step_hook(opt_load_hook) else: map_location = self.args.device if self.args.world_size > 1 else "cpu" if self.is_fsdp_enabled: load_fsdp_optimizer( self.accelerator.state.fsdp_plugin, self.accelerator, self.optimizer, self.model, checkpoint, ) else: self.optimizer.load_state_dict( torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) ) with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) reissue_pt_warnings(caught_warnings) def hyperparameter_search( self, hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, n_trials: int = 20, direction: Union[str, List[str]] = "minimize", backend: Optional[Union["str", HPSearchBackend]] = None, hp_name: Optional[Callable[["optuna.Trial"], str]] = None, **kwargs, ) -> Union[BestRun, List[BestRun]]: if backend is None: backend = default_hp_search_backend() backend = HPSearchBackend(backend) backend_obj = ALL_HYPERPARAMETER_SEARCH_BACKENDS[backend]() backend_obj.ensure_available() self.hp_search_backend = backend if self.model_init is None: raise RuntimeError( "To use hyperparameter search, you need to pass your model through a model_init function." ) self.hp_space = backend_obj.default_hp_space if hp_space is None else hp_space self.hp_name = hp_name self.compute_objective = default_compute_objective if compute_objective is None else compute_objective best_run = backend_obj.run(self, n_trials, direction, **kwargs) self.hp_search_backend = None return best_run def log(self, logs: Dict[str, float]) -> None: if self.state.epoch is not None: logs["epoch"] = round(self.state.epoch, 2) if self.args.include_num_input_tokens_seen: logs["num_input_tokens_seen"] = self.state.num_input_tokens_seen output = {**logs, **{"step": self.state.global_step}} self.state.log_history.append(output) self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: if isinstance(data, Mapping): return type(data)({k: self._prepare_input(v) for k, v in data.items()}) elif isinstance(data, (tuple, list)): return type(data)(self._prepare_input(v) for v in data) elif isinstance(data, torch.Tensor): kwargs = {"device": self.args.device} if self.is_deepspeed_enabled and (torch.is_floating_point(data) or torch.is_complex(data)): kwargs.update({"dtype": self.accelerator.state.deepspeed_plugin.hf_ds_config.dtype()}) return data.to(**kwargs) return data def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: inputs = self._prepare_input(inputs) if len(inputs) == 0: raise ValueError( "The batch received was empty, your model won't be able to train on it. Double-check that your " f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." ) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past return inputs def compute_loss_context_manager(self): return self.autocast_smart_context_manager() def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): if self.use_cpu_amp: ctx_manager = torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) else: ctx_manager = contextlib.nullcontext() return ctx_manager def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: model.train() inputs = self._prepare_inputs(inputs) if is_sagemaker_mp_enabled(): loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) return loss_mb.reduce_mean().detach().to(self.args.device) with self.compute_loss_context_manager(): loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: loss = loss.mean() if self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: self.accelerator.backward(loss) return loss.detach() / self.args.gradient_accumulation_steps def compute_loss(self, model, inputs, return_outputs=False): if self.label_smoother is not None and "labels" in inputs: labels = inputs.pop("labels") else: labels = None outputs = model(**inputs) if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: unwrapped_model = unwrap_model(model) if is_peft_available() and isinstance(unwrapped_model, PeftModel): model_name = unwrapped_model.base_model.model._get_name() else: model_name = unwrapped_model._get_name() if model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] return (loss, outputs) if return_outputs else loss def is_local_process_zero(self) -> bool: return self.args.local_process_index == 0 def is_world_process_zero(self) -> bool: if is_sagemaker_mp_enabled(): return smp.rank() == 0 else: return self.args.process_index == 0 def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): if output_dir is None: output_dir = self.args.output_dir if is_torch_tpu_available(): self._save_tpu(output_dir) elif is_sagemaker_mp_enabled(): os.makedirs(output_dir, exist_ok=True) state_dict = self.model_wrapped.state_dict() if self.args.should_save: self._save(output_dir, state_dict=state_dict) if IS_SAGEMAKER_MP_POST_1_10: Path(os.path.join(output_dir, "user_content.pt")).touch() elif self.is_fsdp_enabled: if ("FULL_STATE_DICT" in str(self.accelerator.state.fsdp_plugin.state_dict_type)) and ( version.parse(accelerate_version) > version.parse("0.24.1") ): state_dict = self.accelerator.get_state_dict(self.model) if self.args.should_save: self._save(output_dir, state_dict=state_dict) elif self.is_deepspeed_enabled: try: state_dict = self.accelerator.get_state_dict(self.deepspeed) if self.args.should_save: self._save(output_dir, state_dict=state_dict) except ValueError: logger.warning( " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" " zero_to_fp32.py to recover weights" ) if self.args.should_save: self._save(output_dir, state_dict={}) remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME]) self.model_wrapped.save_checkpoint(output_dir) elif self.args.should_save: self._save(output_dir) if self.args.push_to_hub and not _internal_call: self.push_to_hub(commit_message="Model save") def _save_tpu(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info(f"Saving model checkpoint to {output_dir}") if xm.is_master_ordinal(): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) xm.rendezvous("saving_checkpoint") if not isinstance(self.model, PreTrainedModel): if isinstance(unwrap_model(self.model), PreTrainedModel): unwrap_model(self.model).save_pretrained( output_dir, is_main_process=self.args.should_save, state_dict=self.model.state_dict(), save_function=xm.save, ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = self.model.state_dict() xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save) if self.tokenizer is not None and self.args.should_save: self.tokenizer.save_pretrained(output_dir) def _save(self, output_dir: Optional[str] = None, state_dict=None): output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") supported_classes = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) if not isinstance(self.model, supported_classes): if state_dict is None: state_dict = self.model.state_dict() if isinstance(unwrap_model(self.model), supported_classes): unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) else: logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") if self.args.save_safetensors: safetensors.torch.save_file(state_dict, os.path.join(output_dir, SAFE_WEIGHTS_NAME)) else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, safe_serialization=self.args.save_safetensors ) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def store_flos(self): if self.args.parallel_mode == ParallelMode.DISTRIBUTED: self.state.total_flos += ( distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() ) self.current_flos = 0 else: self.state.total_flos += self.current_flos self.current_flos = 0 def _sorted_checkpoints( self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False ) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] if ( self.state.best_model_checkpoint is not None and str(Path(self.state.best_model_checkpoint)) in checkpoints_sorted ): best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) for i in range(best_model_index, len(checkpoints_sorted) - 2): checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if len(checkpoints_sorted) <= self.args.save_total_limit: return save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[-1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint, ignore_errors=True) def evaluate( self, eval_dataset: Optional[Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> Dict[str, float]: self._memory_tracker.start() eval_dataloader = self.get_eval_dataloader(eval_dataset) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( eval_dataloader, description="Evaluation", prediction_loss_only=True if self.compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix, ) total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) self.log(output.metrics) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return output.metrics def predict( self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test" ) -> PredictionOutput: self._memory_tracker.start() test_dataloader = self.get_test_dataloader(test_dataset) start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop output = eval_loop( test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix ) total_batch_size = self.args.eval_batch_size * self.args.world_size if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size), ) ) self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) self._memory_tracker.stop_and_update_metrics(output.metrics) return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: args = self.args prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: model = ( self.accelerator.prepare(model) if self.is_deepspeed_enabled else self.accelerator.prepare_model(model, evaluation_mode=True) ) if self.is_fsdp_enabled: self.model = model if model is not self.model: self.model_wrapped = model if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = self.args.eval_batch_size logger.info(f"***** Running {description} *****") if has_length(dataloader): logger.info(f" Num examples = {self.num_examples(dataloader)}") else: logger.info(" Num examples: Unknown") logger.info(f" Batch size = {batch_size}") model.eval() self.callback_handler.eval_dataloader = dataloader eval_dataset = getattr(dataloader, "dataset", None) if args.past_index >= 0: self._past = None losses_host = None preds_host = None labels_host = None inputs_host = None all_losses = None all_preds = None all_labels = None all_inputs = None observed_num_examples = 0 for step, inputs in enumerate(dataloader): observed_batch_size = find_batch_size(inputs) if observed_batch_size is not None: observed_num_examples += observed_batch_size if batch_size is None: batch_size = observed_batch_size loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, "main_input_name", "input_ids") inputs_decode = self._prepare_input(inputs[main_input_name]) if args.include_inputs_for_metrics else None if is_torch_tpu_available(): xm.mark_step() if loss is not None: losses = self.gather_function((loss.repeat(batch_size))) losses_host = losses if losses_host is None else nested_concat(losses_host, losses, padding_index=-100) if labels is not None: labels = self.accelerator.pad_across_processes(labels, dim=1, pad_index=-100) if inputs_decode is not None: inputs_decode = self.accelerator.pad_across_processes(inputs_decode, dim=1, pad_index=-100) inputs_decode = self.gather_function((inputs_decode)) inputs_host = ( inputs_decode if inputs_host is None else nested_concat(inputs_host, inputs_decode, padding_index=-100) ) if logits is not None: logits = self.accelerator.pad_across_processes(logits, dim=1, pad_index=-100) if self.preprocess_logits_for_metrics is not None: logits = self.preprocess_logits_for_metrics(logits, labels) logits = self.gather_function((logits)) preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: labels = self.gather_function((labels)) labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: if losses_host is not None: losses = nested_numpify(losses_host) all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) if preds_host is not None: logits = nested_numpify(preds_host) all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if inputs_host is not None: inputs_decode = nested_numpify(inputs_host) all_inputs = ( inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) ) if labels_host is not None: labels = nested_numpify(labels_host) all_labels = ( labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) ) losses_host, preds_host, inputs_host, labels_host = None, None, None, None self.gather_function = self.accelerator.gather_for_metrics if args.past_index and hasattr(self, "_past"): delattr(self, "_past") if losses_host is not None: losses = nested_numpify(losses_host) all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) if preds_host is not None: logits = nested_numpify(preds_host) all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if inputs_host is not None: inputs_decode = nested_numpify(inputs_host) all_inputs = ( inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) ) if labels_host is not None: labels = nested_numpify(labels_host) all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) if has_length(eval_dataset): num_samples = len(eval_dataset) elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: num_samples = eval_dataset.num_examples else: if has_length(dataloader): num_samples = self.num_examples(dataloader) else: num_samples = observed_num_examples if num_samples == 0 and observed_num_examples > 0: num_samples = observed_num_examples if self.compute_metrics is not None and all_preds is not None and all_labels is not None: if args.include_inputs_for_metrics: metrics = self.compute_metrics( EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs) ) else: metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) else: metrics = {} metrics = denumpify_detensorize(metrics) if all_losses is not None: metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() if hasattr(self, "jit_compilation_time"): metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) def _nested_gather(self, tensors, name=None): if tensors is None: return if is_torch_tpu_available(): if name is None: name = "nested_gather" tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif (self.args.distributed_state is not None and self.args.distributed_state.distributed_type != "NO") or ( self.args.distributed_state is None and self.args.local_rank != -1 ): tensors = distributed_concat(tensors) return tensors def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names) return_loss = inputs.get("return_loss", None) if return_loss is None: return_loss = self.can_return_loss loss_without_labels = True if len(self.label_names) == 0 and return_loss else False inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] if has_labels or loss_without_labels: labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) if len(labels) == 1: labels = labels[0] else: labels = None with torch.no_grad(): if is_sagemaker_mp_enabled(): raw_outputs = smp_forward_only(model, inputs) if has_labels or loss_without_labels: if isinstance(raw_outputs, dict): loss_mb = raw_outputs["loss"] logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"]) else: loss_mb = raw_outputs[0] logits_mb = raw_outputs[1:] loss = loss_mb.reduce_mean().detach().cpu() logits = smp_nested_concat(logits_mb) else: loss = None if isinstance(raw_outputs, dict): logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys) else: logits_mb = raw_outputs logits = smp_nested_concat(logits_mb) else: if has_labels or loss_without_labels: with self.compute_loss_context_manager(): loss, outputs = self.compute_loss(model, inputs, return_outputs=True) loss = loss.mean().detach() if isinstance(outputs, dict): logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"]) else: logits = outputs[1:] else: loss = None with self.compute_loss_context_manager(): outputs = model(**inputs) if isinstance(outputs, dict): logits = tuple(v for k, v in outputs.items() if k not in ignore_keys) else: logits = outputs if self.args.past_index >= 0: self._past = outputs[self.args.past_index - 1] if prediction_loss_only: return (loss, None, None) logits = nested_detach(logits) if len(logits) == 1: logits = logits[0] return (loss, logits, labels) def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): if hasattr(self.model, "floating_point_ops"): return self.model.floating_point_ops(inputs) else: return 0 def init_hf_repo(self): if not self.is_world_process_zero(): return if self.args.hub_model_id is None: repo_name = Path(self.args.output_dir).absolute().name else: repo_name = self.args.hub_model_id repo_url = create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True) self.hub_model_id = repo_url.repo_id self.push_in_progress = None def create_model_card( self, language: Optional[str] = None, license: Optional[str] = None, tags: Union[str, List[str], None] = None, model_name: Optional[str] = None, finetuned_from: Optional[str] = None, tasks: Union[str, List[str], None] = None, dataset_tags: Union[str, List[str], None] = None, dataset: Union[str, List[str], None] = None, dataset_args: Union[str, List[str], None] = None, ): if not self.is_world_process_zero(): return training_summary = TrainingSummary.from_trainer( self, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, ) model_card = training_summary.to_model_card() with open(os.path.join(self.args.output_dir, "README.md"), "w") as f: f.write(model_card) def _push_from_checkpoint(self, checkpoint_folder): if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: return if not self.args.hub_always_push and self.push_in_progress is not None and not self.push_in_progress.is_done(): return output_dir = self.args.output_dir modeling_files = [CONFIG_NAME, WEIGHTS_NAME, SAFE_WEIGHTS_NAME] if is_peft_available(): modeling_files.extend([ADAPTER_CONFIG_NAME, ADAPTER_WEIGHTS_NAME, ADAPTER_SAFE_WEIGHTS_NAME]) for modeling_file in modeling_files: if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) if self.args.save_strategy == IntervalStrategy.STEPS: commit_message = f"Training in progress, step {self.state.global_step}" else: commit_message = f"Training in progress, epoch {int(self.state.epoch)}" model_push_job = upload_folder( repo_id=self.hub_model_id, folder_path=output_dir, commit_message=commit_message, token=self.args.hub_token, run_as_future=True, ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], ) push_jobs = [model_push_job] if self.args.hub_strategy in [HubStrategy.CHECKPOINT, HubStrategy.ALL_CHECKPOINTS]: path_in_repo = ( "last-checkpoint" if self.args.hub_strategy == HubStrategy.CHECKPOINT else Path(checkpoint_folder).name ) checkpoint_push = upload_folder( repo_id=self.hub_model_id, folder_path=checkpoint_folder, path_in_repo=path_in_repo, commit_message=commit_message + ", checkpoint", token=self.args.hub_token, run_as_future=True, ) push_jobs.append(checkpoint_push) if self.push_in_progress is None or self.push_in_progress.is_done(): self.push_in_progress = PushInProgress(push_jobs) else: self.push_in_progress.jobs.extend(push_jobs) def _finish_current_push(self): if not hasattr(self, "push_in_progress"): return if self.push_in_progress is not None and not self.push_in_progress.is_done(): logger.info("Waiting for the current checkpoint push to be finished, this might take a couple of minutes.") self.push_in_progress.wait_until_done() def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str: model_name = kwargs.pop("model_name", None) if model_name is None and self.args.should_save: if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split("/")[-1] if self.hub_model_id is None: self.init_hf_repo() self.save_model(_internal_call=True) if not self.is_world_process_zero(): return self.create_model_card(model_name=model_name, **kwargs) self._finish_current_push() return upload_folder( repo_id=self.hub_model_id, folder_path=self.args.output_dir, commit_message=commit_message, token=self.args.hub_token, run_as_future=not blocking, ignore_patterns=["_*", f"{PREFIX_CHECKPOINT_DIR}-*"], ) def prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: args = self.args if not has_length(dataloader): raise ValueError("dataloader must implement a working __len__") prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only if self.is_deepspeed_enabled and self.deepspeed is None: _, _ = deepspeed_init(self, num_training_steps=0, inference=True) model = self._wrap_model(self.model, training=False, dataloader=dataloader) if len(self.accelerator._models) == 0 and model is self.model: model = ( self.accelerator.prepare(model) if self.is_deepspeed_enabled else self.accelerator.prepare_model(model, evaluation_mode=True) ) if self.is_fsdp_enabled: self.model = model if model is not self.model: self.model_wrapped = model if self.is_deepspeed_enabled: self.deepspeed = self.model_wrapped if not self.is_in_train: if args.fp16_full_eval: model = model.to(dtype=torch.float16, device=args.device) elif args.bf16_full_eval: model = model.to(dtype=torch.bfloat16, device=args.device) batch_size = dataloader.batch_size num_examples = self.num_examples(dataloader) logger.info(f"***** Running {description} *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Batch size = {batch_size}") losses_host: torch.Tensor = None preds_host: Union[torch.Tensor, List[torch.Tensor]] = None labels_host: Union[torch.Tensor, List[torch.Tensor]] = None inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None world_size = max(1, args.world_size) eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) if not prediction_loss_only: make_multiple_of = None if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler): make_multiple_of = dataloader.sampler.batch_size preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) model.eval() if args.past_index >= 0: self._past = None self.callback_handler.eval_dataloader = dataloader for step, inputs in enumerate(dataloader): loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) main_input_name = getattr(self.model, "main_input_name", "input_ids") inputs_decode = self._prepare_input(inputs[main_input_name]) if args.include_inputs_for_metrics else None if loss is not None: losses = loss.repeat(batch_size) losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) if logits is not None: preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) if inputs_decode is not None: inputs_host = ( inputs_decode if inputs_host is None else nested_concat(inputs_host, inputs_decode, padding_index=-100) ) self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) losses_host, preds_host, labels_host, inputs_host = None, None, None, None if args.past_index and hasattr(self, "_past"): delattr(self, "_past") eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) eval_loss = eval_losses_gatherer.finalize() preds = preds_gatherer.finalize() if not prediction_loss_only else None label_ids = labels_gatherer.finalize() if not prediction_loss_only else None inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None if self.compute_metrics is not None and preds is not None and label_ids is not None: if args.include_inputs_for_metrics: metrics = self.compute_metrics( EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids) ) else: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics = denumpify_detensorize(metrics) if eval_loss is not None: metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item() for key in list(metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) def _gather_and_numpify(self, tensors, name): if tensors is None: return if is_torch_tpu_available(): tensors = nested_xla_mesh_reduce(tensors, name) elif is_sagemaker_mp_enabled(): tensors = smp_gather(tensors) elif self.args.parallel_mode == ParallelMode.DISTRIBUTED: tensors = distributed_concat(tensors) return nested_numpify(tensors) def _add_sm_patterns_to_gitignore(self) -> None: if not self.is_world_process_zero(): return patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"] if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")): with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f: current_content = f.read() else: current_content = "" content = current_content for pattern in patterns: if pattern not in content: if content.endswith("\n"): content += pattern else: content += f"\n{pattern}" if content != current_content: with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f: logger.debug(f"Writing .gitignore file. Content: {content}") f.write(content) self.repo.git_add(".gitignore") time.sleep(0.5) if not self.repo.is_repo_clean(): self.repo.git_commit("Add *.sagemaker patterns to .gitignore.") self.repo.git_push() def create_accelerator_and_postprocess(self): grad_acc_kwargs = {"num_steps": self.args.gradient_accumulation_steps} grad_acc_kwargs["sync_with_dataloader"] = False gradient_accumulation_plugin = GradientAccumulationPlugin(**grad_acc_kwargs) self.accelerator = Accelerator( dispatch_batches=self.args.dispatch_batches, split_batches=self.args.split_batches, deepspeed_plugin=self.args.deepspeed_plugin, gradient_accumulation_plugin=gradient_accumulation_plugin, ) self.gather_function = self.accelerator.gather_for_metrics self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None if self.is_fsdp_enabled: fsdp_plugin = self.accelerator.state.fsdp_plugin fsdp_plugin.limit_all_gathers = self.args.fsdp_config.get( "limit_all_gathers", fsdp_plugin.limit_all_gathers ) if is_accelerate_available("0.23.0"): fsdp_plugin.activation_checkpointing = self.args.fsdp_config.get( "activation_checkpointing", fsdp_plugin.activation_checkpointing ) if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing: raise ValueError( "The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg " "can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic " "when using FSDP." ) if self.is_deepspeed_enabled: if getattr(self.args, "hf_deepspeed_config", None) is None: from transformers.integrations.deepspeed import HfTrainerDeepSpeedConfig ds_plugin = self.accelerator.state.deepspeed_plugin ds_plugin.hf_ds_config = HfTrainerDeepSpeedConfig(ds_plugin.hf_ds_config.config) ds_plugin.deepspeed_config = ds_plugin.hf_ds_config.config ds_plugin.hf_ds_config.trainer_config_process(self.args)
codingutf8 2020present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license callbacks to use with the trainer class and customize the training loop a class containing the trainer inner state that will be saved along the model and optimizer when checkpointing and passed to the trainercallback tip in all this class one step is to be understood as one update step when using gradient accumulation one update step may require several forward and backward passes if you use gradientaccumulationstepsn then one update step requires going through n batches tip args epoch float optional only set during training will represent the epoch the training is at the decimal part being the percentage of the current epoch completed globalstep int optional defaults to 0 during training represents the number of update steps completed maxsteps int optional defaults to 0 the number of update steps to do during the current training loggingsteps int optional defaults to 500 log every x updates steps evalsteps int optional run an evaluation every x steps savesteps int optional defaults to 500 save checkpoint every x updates steps numinputtokensseen int optional defaults to 0 the number of tokens seen during training number of input tokens not the number of prediction tokens totalflos float optional defaults to 0 the total number of floating operations done by the model since the beginning of training stored as floats to avoid overflow loghistory listdictstr float optional the list of logs done since the beginning of training bestmetric float optional when tracking the best model the value of the best metric encountered so far bestmodelcheckpoint str optional when tracking the best model the value of the name of the checkpoint for the best model encountered so far islocalprocesszero bool optional defaults to true whether or not this process is the local e g on one machine if training in a distributed fashion on several machines main process isworldprocesszero bool optional defaults to true whether or not this process is the global main process when training in a distributed fashion on several machines this is only going to be true for one process ishyperparamsearch bool optional defaults to false whether we are in the process of a hyper parameter search using trainer hyperparametersearch this will impact the way data will be logged in tensorboard save the content of this instance in json format inside jsonpath jsonstring json dumpsdataclasses asdictself indent2 sortkeystrue n with openjsonpath w encodingutf8 as f f writejsonstring classmethod def loadfromjsoncls jsonpath str a class that handles the trainer control flow this class is used by the trainercallback to activate some switches in the training loop args shouldtrainingstop bool optional defaults to false whether or not the training should be interrupted if true this variable will not be set back to false the training will just stop shouldepochstop bool optional defaults to false whether or not the current epoch should be interrupted if true this variable will be set back to false at the beginning of the next epoch shouldsave bool optional defaults to false whether or not the model should be saved at this step if true this variable will be set back to false at the beginning of the next step shouldevaluate bool optional defaults to false whether or not the model should be evaluated at this step if true this variable will be set back to false at the beginning of the next step shouldlog bool optional defaults to false whether or not the logs should be reported at this step if true this variable will be set back to false at the beginning of the next step internal method that resets the variable for a new training self shouldtrainingstop false def newepochself internal method that resets the variable for a new step self shouldsave false self shouldevaluate false self shouldlog false class trainercallback noformat event called at the end of the initialization of the trainer event called at the beginning of training event called at the end of training event called at the beginning of an epoch event called at the end of an epoch event called at the beginning of a training step if using gradient accumulation one training step might take several inputs event called at the end of an substep during gradient accumulation event called at the end of a training step if using gradient accumulation one training step might take several inputs event called after an evaluation phase event called after a successful prediction event called after a checkpoint save event called after logging the last logs event called after a prediction step internal class that just calls the list of callbacks in order def initself callbacks model tokenizer optimizer lrscheduler self callbacks for cb in callbacks self addcallbackcb self model model self tokenizer tokenizer self optimizer optimizer self lrscheduler lrscheduler self traindataloader none self evaldataloader none if not anyisinstancecb defaultflowcallback for cb in self callbacks logger warning the trainer will not work properly if you don t have a defaultflowcallback in its callbacks youn should add one before training with trainer addcallbackdefaultflowcallback the current list of callbacks isn self callbacklist def addcallbackself callback cb callback if isinstancecallback type else callback cbclass callback if isinstancecallback type else callback class if cbclass in c class for c in self callbacks logger warning fyou are adding a cbclass to the callbacks of this trainer but there is already one the current list of callbacks isn self callbacklist self callbacks appendcb def popcallbackself callback if isinstancecallback type for cb in self callbacks if isinstancecb callback self callbacks removecb return cb else for cb in self callbacks if cb callback self callbacks removecb return cb def removecallbackself callback if isinstancecallback type for cb in self callbacks if isinstancecb callback self callbacks removecb return else self callbacks removecallback property def callbacklistself return n joincb class name for cb in self callbacks def oninitendself args trainingarguments state trainerstate control trainercontrol return self calleventoninitend args state control def ontrainbeginself args trainingarguments state trainerstate control trainercontrol control shouldtrainingstop false return self calleventontrainbegin args state control def ontrainendself args trainingarguments state trainerstate control trainercontrol return self calleventontrainend args state control def onepochbeginself args trainingarguments state trainerstate control trainercontrol control shouldepochstop false return self calleventonepochbegin args state control def onepochendself args trainingarguments state trainerstate control trainercontrol return self calleventonepochend args state control def onstepbeginself args trainingarguments state trainerstate control trainercontrol control shouldlog false control shouldevaluate false control shouldsave false return self calleventonstepbegin args state control def onsubstependself args trainingarguments state trainerstate control trainercontrol return self calleventonsubstepend args state control def onstependself args trainingarguments state trainerstate control trainercontrol return self calleventonstepend args state control def onevaluateself args trainingarguments state trainerstate control trainercontrol metrics control shouldevaluate false return self calleventonevaluate args state control metricsmetrics def onpredictself args trainingarguments state trainerstate control trainercontrol metrics return self calleventonpredict args state control metricsmetrics def onsaveself args trainingarguments state trainerstate control trainercontrol control shouldsave false return self calleventonsave args state control def onlogself args trainingarguments state trainerstate control trainercontrol logs control shouldlog false return self calleventonlog args state control logslogs def onpredictionstepself args trainingarguments state trainerstate control trainercontrol return self calleventonpredictionstep args state control def calleventself event args state control kwargs for callback in self callbacks result getattrcallback event args state control modelself model tokenizerself tokenizer optimizerself optimizer lrschedulerself lrscheduler traindataloaderself traindataloader evaldataloaderself evaldataloader kwargs a callback can skip the return of control if it doesn t change it if result is not none control result return control class defaultflowcallbacktrainercallback def onstependself args trainingarguments state trainerstate control trainercontrol kwargs log if state globalstep 1 and args loggingfirststep control shouldlog true if args loggingstrategy intervalstrategy steps and state globalstep state loggingsteps 0 control shouldlog true evaluate if args evaluationstrategy intervalstrategy steps and state globalstep state evalsteps 0 and args evaldelay state globalstep control shouldevaluate true save if args savestrategy intervalstrategy steps and state savesteps 0 and state globalstep state savesteps 0 control shouldsave true end training if state globalstep state maxsteps control shouldtrainingstop true return control def onepochendself args trainingarguments state trainerstate control trainercontrol kwargs log if args loggingstrategy intervalstrategy epoch control shouldlog true evaluate if args evaluationstrategy intervalstrategy epoch and args evaldelay state epoch control shouldevaluate true save if args savestrategy intervalstrategy epoch control shouldsave true return control class progresscallbacktrainercallback def initself self trainingbar none self predictionbar none def ontrainbeginself args state control kwargs if state islocalprocesszero self trainingbar tqdmtotalstate maxsteps dynamicncolstrue self currentstep 0 def onstependself args state control kwargs if state islocalprocesszero self trainingbar updatestate globalstep self currentstep self currentstep state globalstep def onpredictionstepself args state control evaldataloadernone kwargs if state islocalprocesszero and haslengthevaldataloader if self predictionbar is none self predictionbar tqdm totallenevaldataloader leaveself trainingbar is none dynamicncolstrue self predictionbar update1 def onevaluateself args state control kwargs if state islocalprocesszero if self predictionbar is not none self predictionbar close self predictionbar none def onpredictself args state control kwargs if state islocalprocesszero if self predictionbar is not none self predictionbar close self predictionbar none def onlogself args state control logsnone kwargs if state islocalprocesszero and self trainingbar is not none logs poptotalflos none self trainingbar writestrlogs def ontrainendself args state control kwargs if state islocalprocesszero self trainingbar close self trainingbar none class printercallbacktrainercallback def onlogself args state control logsnone kwargs logs poptotalflos none if state islocalprocesszero printlogs class earlystoppingcallbacktrainercallback def initself earlystoppingpatience int 1 earlystoppingthreshold optionalfloat 0 0 self earlystoppingpatience earlystoppingpatience self earlystoppingthreshold earlystoppingthreshold earlystoppingpatiencecounter denotes the number of times validation metrics failed to improve self earlystoppingpatiencecounter 0 def checkmetricvalueself args state control metricvalue bestmetric is set by code for loadbestmodel operator np greater if args greaterisbetter else np less if state bestmetric is none or operatormetricvalue state bestmetric and absmetricvalue state bestmetric self earlystoppingthreshold self earlystoppingpatiencecounter 0 else self earlystoppingpatiencecounter 1 def ontrainbeginself args state control kwargs assert args loadbestmodelatend earlystoppingcallback requires loadbestmodelatend true assert args metricforbestmodel is not none earlystoppingcallback requires metricforbestmodel is defined assert args evaluationstrategy intervalstrategy no earlystoppingcallback requires intervalstrategy of steps or epoch def onevaluateself args state control metrics kwargs metrictocheck args metricforbestmodel if not metrictocheck startswitheval metrictocheck fevalmetrictocheck metricvalue metrics getmetrictocheck if metricvalue is none logger warning fearly stopping required metricforbestmodel but did not find metrictocheck so early stopping is disabled return self checkmetricvalueargs state control metricvalue if self earlystoppingpatiencecounter self earlystoppingpatience control shouldtrainingstop true coding utf 8 2020 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license callbacks to use with the trainer class and customize the training loop a class containing the trainer inner state that will be saved along the model and optimizer when checkpointing and passed to the trainercallback tip in all this class one step is to be understood as one update step when using gradient accumulation one update step may require several forward and backward passes if you use gradient_accumulation_steps n then one update step requires going through n batches tip args epoch float optional only set during training will represent the epoch the training is at the decimal part being the percentage of the current epoch completed global_step int optional defaults to 0 during training represents the number of update steps completed max_steps int optional defaults to 0 the number of update steps to do during the current training logging_steps int optional defaults to 500 log every x updates steps eval_steps int optional run an evaluation every x steps save_steps int optional defaults to 500 save checkpoint every x updates steps num_input_tokens_seen int optional defaults to 0 the number of tokens seen during training number of input tokens not the number of prediction tokens total_flos float optional defaults to 0 the total number of floating operations done by the model since the beginning of training stored as floats to avoid overflow log_history list dict str float optional the list of logs done since the beginning of training best_metric float optional when tracking the best model the value of the best metric encountered so far best_model_checkpoint str optional when tracking the best model the value of the name of the checkpoint for the best model encountered so far is_local_process_zero bool optional defaults to true whether or not this process is the local e g on one machine if training in a distributed fashion on several machines main process is_world_process_zero bool optional defaults to true whether or not this process is the global main process when training in a distributed fashion on several machines this is only going to be true for one process is_hyper_param_search bool optional defaults to false whether we are in the process of a hyper parameter search using trainer hyperparameter_search this will impact the way data will be logged in tensorboard save the content of this instance in json format inside json_path create an instance from the content of json_path a class that handles the trainer control flow this class is used by the trainercallback to activate some switches in the training loop args should_training_stop bool optional defaults to false whether or not the training should be interrupted if true this variable will not be set back to false the training will just stop should_epoch_stop bool optional defaults to false whether or not the current epoch should be interrupted if true this variable will be set back to false at the beginning of the next epoch should_save bool optional defaults to false whether or not the model should be saved at this step if true this variable will be set back to false at the beginning of the next step should_evaluate bool optional defaults to false whether or not the model should be evaluated at this step if true this variable will be set back to false at the beginning of the next step should_log bool optional defaults to false whether or not the logs should be reported at this step if true this variable will be set back to false at the beginning of the next step internal method that resets the variable for a new training internal method that resets the variable for a new epoch internal method that resets the variable for a new step no format a class for objects that will inspect the state of the training loop at some events and take some decisions at each of those events the following arguments are available args args trainingarguments the training arguments used to instantiate the trainer state trainerstate the current state of the trainer control trainercontrol the object that is returned to the trainer and can be used to make some decisions model pretrainedmodel or torch nn module the model being trained tokenizer pretrainedtokenizer the tokenizer used for encoding the data optimizer torch optim optimizer the optimizer used for the training steps lr_scheduler torch optim lr_scheduler lambdalr the scheduler used for setting the learning rate train_dataloader torch utils data dataloader optional the current dataloader used for training eval_dataloader torch utils data dataloader optional the current dataloader used for training metrics dict str float the metrics computed by the last evaluation phase those are only accessible in the event on_evaluate logs dict str float the values to log those are only accessible in the event on_log the control object is the only one that can be changed by the callback in which case the event that changes it should return the modified version the argument args state and control are positionals for all events all the others are grouped in kwargs you can unpack the ones you need in the signature of the event using them as an example see the code of the simple transformers printercallback example python class printercallback trainercallback def on_log self args state control logs none kwargs _ logs pop total_flos none if state is_local_process_zero print logs event called at the end of the initialization of the trainer event called at the beginning of training event called at the end of training event called at the beginning of an epoch event called at the end of an epoch event called at the beginning of a training step if using gradient accumulation one training step might take several inputs event called at the end of an substep during gradient accumulation event called at the end of a training step if using gradient accumulation one training step might take several inputs event called after an evaluation phase event called after a successful prediction event called after a checkpoint save event called after logging the last logs event called after a prediction step internal class that just calls the list of callbacks in order a callback can skip the return of control if it doesn t change it a trainercallback that handles the default flow of the training loop for logs evaluation and checkpoints log evaluate save end training log evaluate save a trainercallback that displays the progress of training or evaluation a bare trainercallback that just prints the logs a trainercallback that handles early stopping args early_stopping_patience int use with metric_for_best_model to stop training when the specified metric worsens for early_stopping_patience evaluation calls early_stopping_threshold float optional use with trainingarguments metric_for_best_model and early_stopping_patience to denote how much the specified metric must improve to satisfy early stopping conditions this callback depends on trainingarguments argument load_best_model_at_end functionality to set best_metric in trainerstate note that if the trainingarguments argument save_steps differs from eval_steps the early stopping will not occur until the next save step early_stopping_patience_counter denotes the number of times validation metrics failed to improve best_metric is set by code for load_best_model
import dataclasses import json from dataclasses import dataclass from typing import Dict, List, Optional, Union import numpy as np from tqdm.auto import tqdm from .trainer_utils import IntervalStrategy, has_length from .training_args import TrainingArguments from .utils import logging logger = logging.get_logger(__name__) @dataclass class TrainerState: epoch: Optional[float] = None global_step: int = 0 max_steps: int = 0 logging_steps: int = 500 eval_steps: int = 500 save_steps: int = 500 num_train_epochs: int = 0 num_input_tokens_seen: int = 0 total_flos: float = 0 log_history: List[Dict[str, float]] = None best_metric: Optional[float] = None best_model_checkpoint: Optional[str] = None is_local_process_zero: bool = True is_world_process_zero: bool = True is_hyper_param_search: bool = False trial_name: str = None trial_params: Dict[str, Union[str, float, int, bool]] = None def __post_init__(self): if self.log_history is None: self.log_history = [] def save_to_json(self, json_path: str): json_string = json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + "\n" with open(json_path, "w", encoding="utf-8") as f: f.write(json_string) @classmethod def load_from_json(cls, json_path: str): with open(json_path, "r", encoding="utf-8") as f: text = f.read() return cls(**json.loads(text)) @dataclass class TrainerControl: should_training_stop: bool = False should_epoch_stop: bool = False should_save: bool = False should_evaluate: bool = False should_log: bool = False def _new_training(self): self.should_training_stop = False def _new_epoch(self): self.should_epoch_stop = False def _new_step(self): self.should_save = False self.should_evaluate = False self.should_log = False class TrainerCallback: def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs): pass def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): pass class CallbackHandler(TrainerCallback): def __init__(self, callbacks, model, tokenizer, optimizer, lr_scheduler): self.callbacks = [] for cb in callbacks: self.add_callback(cb) self.model = model self.tokenizer = tokenizer self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_dataloader = None self.eval_dataloader = None if not any(isinstance(cb, DefaultFlowCallback) for cb in self.callbacks): logger.warning( "The Trainer will not work properly if you don't have a `DefaultFlowCallback` in its callbacks. You\n" + "should add one before training with `trainer.add_callback(DefaultFlowCallback). The current list of" + "callbacks is\n:" + self.callback_list ) def add_callback(self, callback): cb = callback() if isinstance(callback, type) else callback cb_class = callback if isinstance(callback, type) else callback.__class__ if cb_class in [c.__class__ for c in self.callbacks]: logger.warning( f"You are adding a {cb_class} to the callbacks of this Trainer, but there is already one. The current" + "list of callbacks is\n:" + self.callback_list ) self.callbacks.append(cb) def pop_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return cb else: for cb in self.callbacks: if cb == callback: self.callbacks.remove(cb) return cb def remove_callback(self, callback): if isinstance(callback, type): for cb in self.callbacks: if isinstance(cb, callback): self.callbacks.remove(cb) return else: self.callbacks.remove(callback) @property def callback_list(self): return "\n".join(cb.__class__.__name__ for cb in self.callbacks) def on_init_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_init_end", args, state, control) def on_train_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_training_stop = False return self.call_event("on_train_begin", args, state, control) def on_train_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_train_end", args, state, control) def on_epoch_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_epoch_stop = False return self.call_event("on_epoch_begin", args, state, control) def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_epoch_end", args, state, control) def on_step_begin(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_log = False control.should_evaluate = False control.should_save = False return self.call_event("on_step_begin", args, state, control) def on_substep_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_substep_end", args, state, control) def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_step_end", args, state, control) def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): control.should_evaluate = False return self.call_event("on_evaluate", args, state, control, metrics=metrics) def on_predict(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics): return self.call_event("on_predict", args, state, control, metrics=metrics) def on_save(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): control.should_save = False return self.call_event("on_save", args, state, control) def on_log(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, logs): control.should_log = False return self.call_event("on_log", args, state, control, logs=logs) def on_prediction_step(self, args: TrainingArguments, state: TrainerState, control: TrainerControl): return self.call_event("on_prediction_step", args, state, control) def call_event(self, event, args, state, control, **kwargs): for callback in self.callbacks: result = getattr(callback, event)( args, state, control, model=self.model, tokenizer=self.tokenizer, optimizer=self.optimizer, lr_scheduler=self.lr_scheduler, train_dataloader=self.train_dataloader, eval_dataloader=self.eval_dataloader, **kwargs, ) if result is not None: control = result return control class DefaultFlowCallback(TrainerCallback): def on_step_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): if state.global_step == 1 and args.logging_first_step: control.should_log = True if args.logging_strategy == IntervalStrategy.STEPS and state.global_step % state.logging_steps == 0: control.should_log = True if ( args.evaluation_strategy == IntervalStrategy.STEPS and state.global_step % state.eval_steps == 0 and args.eval_delay <= state.global_step ): control.should_evaluate = True if ( args.save_strategy == IntervalStrategy.STEPS and state.save_steps > 0 and state.global_step % state.save_steps == 0 ): control.should_save = True if state.global_step >= state.max_steps: control.should_training_stop = True return control def on_epoch_end(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs): if args.logging_strategy == IntervalStrategy.EPOCH: control.should_log = True if args.evaluation_strategy == IntervalStrategy.EPOCH and args.eval_delay <= state.epoch: control.should_evaluate = True if args.save_strategy == IntervalStrategy.EPOCH: control.should_save = True return control class ProgressCallback(TrainerCallback): def __init__(self): self.training_bar = None self.prediction_bar = None def on_train_begin(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar = tqdm(total=state.max_steps, dynamic_ncols=True) self.current_step = 0 def on_step_end(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar.update(state.global_step - self.current_step) self.current_step = state.global_step def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs): if state.is_local_process_zero and has_length(eval_dataloader): if self.prediction_bar is None: self.prediction_bar = tqdm( total=len(eval_dataloader), leave=self.training_bar is None, dynamic_ncols=True ) self.prediction_bar.update(1) def on_evaluate(self, args, state, control, **kwargs): if state.is_local_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_predict(self, args, state, control, **kwargs): if state.is_local_process_zero: if self.prediction_bar is not None: self.prediction_bar.close() self.prediction_bar = None def on_log(self, args, state, control, logs=None, **kwargs): if state.is_local_process_zero and self.training_bar is not None: _ = logs.pop("total_flos", None) self.training_bar.write(str(logs)) def on_train_end(self, args, state, control, **kwargs): if state.is_local_process_zero: self.training_bar.close() self.training_bar = None class PrinterCallback(TrainerCallback): def on_log(self, args, state, control, logs=None, **kwargs): _ = logs.pop("total_flos", None) if state.is_local_process_zero: print(logs) class EarlyStoppingCallback(TrainerCallback): def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0): self.early_stopping_patience = early_stopping_patience self.early_stopping_threshold = early_stopping_threshold self.early_stopping_patience_counter = 0 def check_metric_value(self, args, state, control, metric_value): operator = np.greater if args.greater_is_better else np.less if state.best_metric is None or ( operator(metric_value, state.best_metric) and abs(metric_value - state.best_metric) > self.early_stopping_threshold ): self.early_stopping_patience_counter = 0 else: self.early_stopping_patience_counter += 1 def on_train_begin(self, args, state, control, **kwargs): assert args.load_best_model_at_end, "EarlyStoppingCallback requires load_best_model_at_end = True" assert ( args.metric_for_best_model is not None ), "EarlyStoppingCallback requires metric_for_best_model is defined" assert ( args.evaluation_strategy != IntervalStrategy.NO ), "EarlyStoppingCallback requires IntervalStrategy of steps or epoch" def on_evaluate(self, args, state, control, metrics, **kwargs): metric_to_check = args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics.get(metric_to_check) if metric_value is None: logger.warning( f"early stopping required metric_for_best_model, but did not find {metric_to_check} so early stopping" " is disabled" ) return self.check_metric_value(args, state, control, metric_value) if self.early_stopping_patience_counter >= self.early_stopping_patience: control.should_training_stop = True
codingutf8 2020present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license torch utilities for the trainer class this is used to suppress an undesired warning emitted by pytorch versions 1 4 21 7 0 concatenates tensor1 and tensor2 on first axis applying padding on the second if necessary tensor1 atleast1dtensor1 tensor2 atleast1dtensor2 if lentensor1 shape 1 or tensor1 shape1 tensor2 shape1 return torch cattensor1 tensor2 dim0 let s figure out the new shape newshape tensor1 shape0 tensor2 shape0 maxtensor1 shape1 tensor2 shape1 tensor1 shape2 now let s fill the result tensor result tensor1 newfullnewshape paddingindex result tensor1 shape0 tensor1 shape1 tensor1 resulttensor1 shape0 tensor2 shape1 tensor2 return result def numpypadandconcatenatearray1 array2 paddingindex100 let s figure out the new shape now let s fill the result tensor concat the newtensors to tensors on the first dim and pad them on the second if needed works for tensors or nested listtuplesdict of tensors find the first dimension of a tensor in a nested listtupledict of tensors as of numpy 1 21 4 numpy does not support bfloat16 see https github comnumpynumpybloba47ecdea856986cd60eabbd53265c2ca5916ad5ddocsourceuserbasics types rst until numpy adds bfloat16 we must convert float32 truncate the dummy elements added by sequentialdistributedsampler truncate the dummy elements added by sequentialdistributedsampler reissue warnings that are not the savestatewarning decorator to make all processes in distributed training wait for each localmaster to do something args localrank int the rank of the local process like a torch utils data distributed distributedsampler but loops at the end back to the beginning of the shuffled samples to make each process have a round multiple of batchsize samples args dataset torch utils data dataset dataset used for sampling batchsize int the batch size used with this sampler kwargs dictstr any optional all other keyword arguments passed to distributedsampler distributedsampler already added samples from the beginning to make the number of samples a round multiple of the world size so we skip those distributed sampler that subsamples indices sequentially making it easier to collate all results at the end even though we only use this sampler for eval and predict no training which means that the model params won t have to be synced i e will not hang for synchronization even if varied number of forward passes we still add extra samples to the sampler to make it evenly divisible like in distributedsampler to make it easy to gather or reduce resulting tensors at the end of the loop add extra samples to make numsamples a multiple of batchsize if passed add extra samples to make it evenly divisible subsample create the same nested structure as arrays with a first dimension always at numsamples if isinstancearrays list tuple return typearraysnestednewlikex numsamples for x in arrays return np fulllikearrays paddingindex shapenumsamples arrays shape1 def expandlikearrays newseqlength paddingindex100 a class responsible for properly gathering tensors or nested listtuple of tensors on the cpu by chunks if our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on cpu at every step our sampler will generate the following indices 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 0 1 to get something of size a multiple of 3 so that each process gets the same dataset length then process 0 1 and 2 will be responsible of making predictions for the following samples p0 0 1 2 3 4 5 p1 6 7 8 9 10 11 p2 12 13 14 15 0 1 the first batch treated on each process will be p0 0 1 p1 6 7 p2 12 13 so if we gather at the end of the first batch we will get a tensor nested listtuple of tensor corresponding to the following indices 0 1 6 7 12 13 if we directly concatenate our results without taking any precautions the user will then get the predictions for the indices in this order at the end of the prediction loop 0 1 6 7 12 13 2 3 8 9 14 15 4 5 10 11 0 1 for some reason that s not going to roll their boat this class is there to solve that problem args worldsize int the number of processes used in the distributed training numsamples int the number of samples in our dataset makemultipleof int optional if passed the class assumes the datasets passed to each process are made to be a multiple of this argument by adding samples paddingindex int optional defaults to 100 the padding index to use if the arrays don t all have the same sequence length add arrays to the internal storage will initialize the storage to the full size at the first arrays passed so that if we re bound to get an oom it happens at the beginning expand the array on the fly if needed return the properly gathered arrays and truncate to the number of samples since the sampler added some extras to get each process a dataset of the same length adds labelsmoothing on a precomputed output from a transformers model args epsilon float optional defaults to 0 1 the label smoothing factor ignoreindex int optional defaults to 100 the index in the labels to ignore when computing the loss in case the ignoreindex is 100 the gather will fail so we replace labels by 0 the paddingmask will ignore them in any case works for fp16 input tensor too by internally upcasting it to fp32 take the mean over the label dimensions then divide by the number of active elements i e notpadded return a list of indices so that each slice of batchsize consecutive indices correspond to elements of similar lengths to do this the indices are randomly permuted grouped in megabatches of size megabatchmult batchsize sorted by length in each megabatch the result is the concatenation of all megabatches with the batch of batchsize containing the element of maximum length placed first so that an oom happens sooner rather than later default for megabatchmult 50 or the number to get 4 megabatches whichever is smaller just in case for tiny datasets we need to use torch for the random part as a distributed sampler will set the random seed for torch the rest is to get the biggest batch first since each megabatch is sorted by descending length the longest element is the first switch to put the longest element in first position def init self batchsize int dataset optionaldataset none lengths optionallistint none modelinputname optionalstr none generatornone if dataset is none and lengths is none raise valueerrorone of dataset and lengths must be provided self batchsize batchsize if lengths is none modelinputname modelinputname if modelinputname is not none else inputids if not isinstancedataset0 dict or isinstancedataset0 batchencoding or modelinputname not in dataset0 raise valueerror can only automatically infer lengths for datasets whose items are dictionaries with an f modelinputname key lengths lenfeaturemodelinputname for feature in dataset elif isinstancelengths torch tensor logger info if lengths is a torch tensor lengthgroupedsampler will be slow converting lengths to listint lengths lengths tolist self lengths lengths self generator generator def lenself return lenself lengths def iterself indices getlengthgroupedindicesself lengths self batchsize generatorself generator return iterindices class distributedlengthgroupedsamplerdistributedsampler r distributed sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness copied and adapted from pytorch distributedsampler if the dataset length is evenly divisible by of replicas then there is no need to drop any data since the dataset will be split equally split to nearest available length that is evenly divisible this is to ensure each rank receives the same amount of data when using this sampler deterministically shuffle based on epoch and seed add extra samples to make it evenly divisible remove tail of data to make it evenly divisible subsample sampler that shards batches between several processes dispatches indices batch by batch on 2 processes with batch size 4 the first two batches are 0 1 2 3 4 5 6 7 and 8 9 10 11 12 13 14 15 which shard into 0 1 2 3 and 8 9 10 11 for gpu0 and 4 5 6 7 and 12 13 14 15 for gpu1 the sampler thus yields 0 1 2 3 8 9 10 11 on gpu0 and 4 5 6 7 12 13 14 15 on gpu1 add extra samples to make it evenly divisible while loop is there in the edge case we have a tiny dataset and it needs to be done several times each shard only sees a fraction of totalnumsamples wraps a pytorch iterabledataset to generate samples for one of the processes only instances of this class will always yield a number of samples that is a round multiple of the actual batch size which is batchsize x numprocesses depending on the value of the droplast attribute it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning on two processes with an iterable dataset yielding of 0 1 2 3 4 5 6 7 8 9 10 11 with a batch size of 2 the shard on process 0 will yield 0 1 4 5 8 9 so will see batches 0 1 4 5 8 9 the shard on process 1 will yield 2 3 6 7 10 11 so will see batches 2 3 6 7 10 11 tip warningtrue if your iterabledataset implements some randomization that needs to be applied the same way on all processes for instance a shuffling you should use a torch generator in a generator attribute of the dataset to generate your random numbers and call the trainerptutils iterabledatasetshard setepoch method of this object it will set the seed of this generator to seed epoch on all processes before starting the iteration alternatively you can also implement a setepoch method in your iterable dataset to deal with this tip args dataset torch utils data iterabledataset the batch sampler to split in several shards batchsize int optional defaults to 1 the size of the batches per shard droplast bool optional defaults to false whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning numprocesses int optional defaults to 1 the number of processes running concurrently processindex int optional defaults to 0 the index of the current process seed int optional defaults to 0 a random seed that will be used for the random number generation in trainerptutils iterabledatasetshard setepoch wait to have a full batch before yielding elements finished if droplast is true otherwise complete the last batch with elements from the beginning will raise an error if the underlying dataset is not sized in order to keep trainer py compact and easy to understand place any secondary pt trainer helper methods here with deepspeed s fp16 and dynamic loss scale enabled the optimizerscheduler steps may not run for the first few dozen steps while loss scale is too large and thus during that time getlastlr will fail if called during that warm up stage so work around it convert seconds to hh mm ss msec msecs rounded to 2 decimals reformat trainer metrics values to a humanreadable format args metrics dictstr float the metrics returned from trainevaluatepredict returns metrics dictstr float the reformatted metrics log metrics in a specially formatted way under distributed environment this is done only for a process with rank 0 args split str modesplit name one of train eval test metrics dictstr float the metrics returned from trainevaluatepredictmetrics metrics dict notes on memory reports in order to get memory usage report you need to install psutil you can do that with pip install psutil now when this method is run you will see a report that will include initmemcpuallocdelta 1301mb initmemcpupeakeddelta 154mb initmemgpuallocdelta 230mb initmemgpupeakeddelta 0mb trainmemcpuallocdelta 1345mb trainmemcpupeakeddelta 0mb trainmemgpuallocdelta 693mb trainmemgpupeakeddelta 7mb understanding the reports the first segment e g train tells you which stage the metrics are for reports starting with init will be added to the first stage that gets run so that if only evaluation is run the memory usage for the init will be reported along with the eval metrics the third segment is either cpu or gpu tells you whether it s the general ram or the gpu0 memory metric allocdelta is the difference in the usedallocated memory counter between the end and the start of the stage it can be negative if a function released more memory than it allocated peakeddelta is any extra memory that was consumed and then freed relative to the current allocated memory counter it is never negative when you look at the metrics of any stage you add up allocdelta peakeddelta and you know how much memory was needed to complete that stage the reporting happens only for process of rank 0 and gpu 0 if there is a gpu typically this is enough since the main process does the bulk of work but it could be not quite so if model parallel is used and then other gpus may use a different amount of gpu memory this is also not the same under dataparallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating gpus perhaps in the future these reports will evolve to measure those too the cpu ram metric measures rss resident set size includes both the memory which is unique to the process and the memory shared with other processes it is important to note that it does not include swapped out memory so the reports could be imprecise the cpu peak memory is measured using a sampling thread due to python s gil it may miss some of the peak memory if that thread didn t get a chance to run when the highest memory was used therefore this report can be less than reality using tracemalloc would have reported the exact peak memory but it doesn t report memory allocations outside of python so if some c cuda extension allocated its own memory it won t be reported and therefore it was dropped in favor of the memory sampling approach which reads the current process memory usage the gpu allocated and peak memory reporting is done with torch cuda memoryallocated and torch cuda maxmemoryallocated this metric reports only deltas for pytorchspecific allocations as torch cuda memory management system doesn t track any memory allocated outside of pytorch for example the very first cuda call typically loads cuda kernels which may take from 0 5 to 2gb of gpu memory note that this tracker doesn t account for memory allocations outside of trainer s init train evaluate and predict calls because evaluation calls may happen during train we can t handle nested invocations because torch cuda maxmemoryallocated is a single counter so if it gets reset by a nested eval call train s tracker will report incorrect info if this pytorch issuehttps github compytorchpytorchissues16266 gets resolved it will be possible to change this class to be reentrant until then we will only track the outer level of train evaluate and predict methods which means that if eval is called during train it s the latter that will account for its memory usage and that of the former this also means that if any other tool that is used along the trainer calls torch cuda resetpeakmemorystats the gpu peak memory stats could be invalid and the trainer will disrupt the normal behavior of any such tools that rely on calling torch cuda resetpeakmemorystats themselves for best performance you may want to consider turning the memory profiling off for production runs save metrics into a json file for that split e g trainresults json under distributed environment this is done only for a process with rank 0 args split str modesplit name one of train eval test all metrics dictstr float the metrics returned from trainevaluatepredict combined bool optional defaults to true creates combined metrics by updating allresults json with metrics of this call to understand the metrics please read the docstring of trainer logmetrics the only difference is that raw unformatted numbers are saved in the current method saves the trainer state since trainer savemodel saves only the tokenizer with the model under distributed environment this is done only for a process with rank 0 calculate model s total param count if trainableonly is true then count only those requiring grads returns the names of the model parameters that are not inside a forbidden layer add model specific parameters defined with nn parameter since they are not in any child gets a class from a module by its name args module torch nn module the module to get the class from name str the name of the class it doesn t seem possible to check here if tensor is a stepoutput because stepoutput lives in smp step which is also the name of the decorator so python is confused coding utf 8 2020 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license torch utilities for the trainer class this is used to suppress an undesired warning emitted by pytorch versions 1 4 2 1 7 0 concatenates tensor1 and tensor2 on first axis applying padding on the second if necessary let s figure out the new shape now let s fill the result tensor concatenates array1 and array2 on first axis applying padding on the second if necessary let s figure out the new shape now let s fill the result tensor concat the new_tensors to tensors on the first dim and pad them on the second if needed works for tensors or nested list tuples dict of tensors find the first dimension of a tensor in a nested list tuple dict of tensors as of numpy 1 21 4 numpy does not support bfloat16 see https github com numpy numpy blob a47ecdea856986cd60eabbd53265c2ca5916ad5d doc source user basics types rst until numpy adds bfloat16 we must convert float32 truncate the dummy elements added by sequentialdistributedsampler truncate the dummy elements added by sequentialdistributedsampler reissue warnings that are not the save_state_warning decorator to make all processes in distributed training wait for each local_master to do something args local_rank int the rank of the local process like a torch utils data distributed distributedsampler but loops at the end back to the beginning of the shuffled samples to make each process have a round multiple of batch_size samples args dataset torch utils data dataset dataset used for sampling batch_size int the batch size used with this sampler kwargs dict str any optional all other keyword arguments passed to distributedsampler distributedsampler already added samples from the beginning to make the number of samples a round multiple of the world size so we skip those distributed sampler that subsamples indices sequentially making it easier to collate all results at the end even though we only use this sampler for eval and predict no training which means that the model params won t have to be synced i e will not hang for synchronization even if varied number of forward passes we still add extra samples to the sampler to make it evenly divisible like in distributedsampler to make it easy to gather or reduce resulting tensors at the end of the loop add extra samples to make num_samples a multiple of batch_size if passed add extra samples to make it evenly divisible subsample create the same nested structure as arrays with a first dimension always at num_samples expand the arrays so that the second dimension grows to new_seq_length uses padding_index for padding a class responsible for properly gathering tensors or nested list tuple of tensors on the cpu by chunks if our dataset has 16 samples with a batch size of 2 on 3 processes and we gather then transfer on cpu at every step our sampler will generate the following indices 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 0 1 to get something of size a multiple of 3 so that each process gets the same dataset length then process 0 1 and 2 will be responsible of making predictions for the following samples p0 0 1 2 3 4 5 p1 6 7 8 9 10 11 p2 12 13 14 15 0 1 the first batch treated on each process will be p0 0 1 p1 6 7 p2 12 13 so if we gather at the end of the first batch we will get a tensor nested list tuple of tensor corresponding to the following indices 0 1 6 7 12 13 if we directly concatenate our results without taking any precautions the user will then get the predictions for the indices in this order at the end of the prediction loop 0 1 6 7 12 13 2 3 8 9 14 15 4 5 10 11 0 1 for some reason that s not going to roll their boat this class is there to solve that problem args world_size int the number of processes used in the distributed training num_samples int the number of samples in our dataset make_multiple_of int optional if passed the class assumes the datasets passed to each process are made to be a multiple of this argument by adding samples padding_index int optional defaults to 100 the padding index to use if the arrays don t all have the same sequence length add arrays to the internal storage will initialize the storage to the full size at the first arrays passed so that if we re bound to get an oom it happens at the beginning expand the array on the fly if needed return the properly gathered arrays and truncate to the number of samples since the sampler added some extras to get each process a dataset of the same length adds label smoothing on a pre computed output from a transformers model args epsilon float optional defaults to 0 1 the label smoothing factor ignore_index int optional defaults to 100 the index in the labels to ignore when computing the loss in case the ignore_index is 100 the gather will fail so we replace labels by 0 the padding_mask will ignore them in any case works for fp16 input tensor too by internally upcasting it to fp32 take the mean over the label dimensions then divide by the number of active elements i e not padded return a list of indices so that each slice of batch_size consecutive indices correspond to elements of similar lengths to do this the indices are randomly permuted grouped in mega batches of size mega_batch_mult batch_size sorted by length in each mega batch the result is the concatenation of all mega batches with the batch of batch_size containing the element of maximum length placed first so that an oom happens sooner rather than later default for mega_batch_mult 50 or the number to get 4 megabatches whichever is smaller just in case for tiny datasets we need to use torch for the random part as a distributed sampler will set the random seed for torch the rest is to get the biggest batch first since each megabatch is sorted by descending length the longest element is the first switch to put the longest element in first position sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness distributed sampler that samples indices in a way that groups together features of the dataset of roughly the same length while keeping a bit of randomness copied and adapted from pytorch distributedsampler if the dataset length is evenly divisible by of replicas then there is no need to drop any data since the dataset will be split equally split to nearest available length that is evenly divisible this is to ensure each rank receives the same amount of data when using this sampler deterministically shuffle based on epoch and seed add extra samples to make it evenly divisible remove tail of data to make it evenly divisible subsample sampler that shards batches between several processes dispatches indices batch by batch on 2 processes with batch size 4 the first two batches are 0 1 2 3 4 5 6 7 and 8 9 10 11 12 13 14 15 which shard into 0 1 2 3 and 8 9 10 11 for gpu 0 and 4 5 6 7 and 12 13 14 15 for gpu 1 the sampler thus yields 0 1 2 3 8 9 10 11 on gpu 0 and 4 5 6 7 12 13 14 15 on gpu 1 add extra samples to make it evenly divisible while loop is there in the edge case we have a tiny dataset and it needs to be done several times each shard only sees a fraction of total_num_samples wraps a pytorch iterabledataset to generate samples for one of the processes only instances of this class will always yield a number of samples that is a round multiple of the actual batch size which is batch_size x num_processes depending on the value of the drop_last attribute it will either stop the iteration at the first batch that would be too small or loop with indices from the beginning on two processes with an iterable dataset yielding of 0 1 2 3 4 5 6 7 8 9 10 11 with a batch size of 2 the shard on process 0 will yield 0 1 4 5 8 9 so will see batches 0 1 4 5 8 9 the shard on process 1 will yield 2 3 6 7 10 11 so will see batches 2 3 6 7 10 11 tip warning true if your iterabledataset implements some randomization that needs to be applied the same way on all processes for instance a shuffling you should use a torch generator in a generator attribute of the dataset to generate your random numbers and call the trainer_pt_utils iterabledatasetshard set_epoch method of this object it will set the seed of this generator to seed epoch on all processes before starting the iteration alternatively you can also implement a set_epoch method in your iterable dataset to deal with this tip args dataset torch utils data iterabledataset the batch sampler to split in several shards batch_size int optional defaults to 1 the size of the batches per shard drop_last bool optional defaults to false whether or not to drop the last incomplete batch or complete the last batches by using the samples from the beginning num_processes int optional defaults to 1 the number of processes running concurrently process_index int optional defaults to 0 the index of the current process seed int optional defaults to 0 a random seed that will be used for the random number generation in trainer_pt_utils iterabledatasetshard set_epoch wait to have a full batch before yielding elements finished if drop_last is true otherwise complete the last batch with elements from the beginning will raise an error if the underlying dataset is not sized in order to keep trainer py compact and easy to understand place any secondary pt trainer helper methods here with deepspeed s fp16 and dynamic loss scale enabled the optimizer scheduler steps may not run for the first few dozen steps while loss scale is too large and thus during that time get_last_lr will fail if called during that warm up stage so work around it convert seconds to hh mm ss msec msecs rounded to 2 decimals reformat trainer metrics values to a human readable format args metrics dict str float the metrics returned from train evaluate predict returns metrics dict str float the reformatted metrics log metrics in a specially formatted way under distributed environment this is done only for a process with rank 0 args split str mode split name one of train eval test metrics dict str float the metrics returned from train evaluate predictmetrics metrics dict notes on memory reports in order to get memory usage report you need to install psutil you can do that with pip install psutil now when this method is run you will see a report that will include init_mem_cpu_alloc_delta 1301mb init_mem_cpu_peaked_delta 154mb init_mem_gpu_alloc_delta 230mb init_mem_gpu_peaked_delta 0mb train_mem_cpu_alloc_delta 1345mb train_mem_cpu_peaked_delta 0mb train_mem_gpu_alloc_delta 693mb train_mem_gpu_peaked_delta 7mb understanding the reports the first segment e g train__ tells you which stage the metrics are for reports starting with init_ will be added to the first stage that gets run so that if only evaluation is run the memory usage for the __init__ will be reported along with the eval_ metrics the third segment is either cpu or gpu tells you whether it s the general ram or the gpu0 memory metric _alloc_delta is the difference in the used allocated memory counter between the end and the start of the stage it can be negative if a function released more memory than it allocated _peaked_delta is any extra memory that was consumed and then freed relative to the current allocated memory counter it is never negative when you look at the metrics of any stage you add up alloc_delta peaked_delta and you know how much memory was needed to complete that stage the reporting happens only for process of rank 0 and gpu 0 if there is a gpu typically this is enough since the main process does the bulk of work but it could be not quite so if model parallel is used and then other gpus may use a different amount of gpu memory this is also not the same under dataparallel where gpu0 may require much more memory than the rest since it stores the gradient and optimizer states for all participating gpus perhaps in the future these reports will evolve to measure those too the cpu ram metric measures rss resident set size includes both the memory which is unique to the process and the memory shared with other processes it is important to note that it does not include swapped out memory so the reports could be imprecise the cpu peak memory is measured using a sampling thread due to python s gil it may miss some of the peak memory if that thread didn t get a chance to run when the highest memory was used therefore this report can be less than reality using tracemalloc would have reported the exact peak memory but it doesn t report memory allocations outside of python so if some c cuda extension allocated its own memory it won t be reported and therefore it was dropped in favor of the memory sampling approach which reads the current process memory usage the gpu allocated and peak memory reporting is done with torch cuda memory_allocated and torch cuda max_memory_allocated this metric reports only deltas for pytorch specific allocations as torch cuda memory management system doesn t track any memory allocated outside of pytorch for example the very first cuda call typically loads cuda kernels which may take from 0 5 to 2gb of gpu memory note that this tracker doesn t account for memory allocations outside of trainer s __init__ train evaluate and predict calls because evaluation calls may happen during train we can t handle nested invocations because torch cuda max_memory_allocated is a single counter so if it gets reset by a nested eval call train s tracker will report incorrect info if this pytorch issue https github com pytorch pytorch issues 16266 gets resolved it will be possible to change this class to be re entrant until then we will only track the outer level of train evaluate and predict methods which means that if eval is called during train it s the latter that will account for its memory usage and that of the former this also means that if any other tool that is used along the trainer calls torch cuda reset_peak_memory_stats the gpu peak memory stats could be invalid and the trainer will disrupt the normal behavior of any such tools that rely on calling torch cuda reset_peak_memory_stats themselves for best performance you may want to consider turning the memory profiling off for production runs save metrics into a json file for that split e g train_results json under distributed environment this is done only for a process with rank 0 args split str mode split name one of train eval test all metrics dict str float the metrics returned from train evaluate predict combined bool optional defaults to true creates combined metrics by updating all_results json with metrics of this call to understand the metrics please read the docstring of trainer log_metrics the only difference is that raw unformatted numbers are saved in the current method saves the trainer state since trainer save_model saves only the tokenizer with the model under distributed environment this is done only for a process with rank 0 calculate model s total param count if trainable_only is true then count only those requiring grads returns the names of the model parameters that are not inside a forbidden layer add model specific parameters defined with nn parameter since they are not in any child gets a class from a module by its name args module torch nn module the module to get the class from name str the name of the class it doesn t seem possible to check here if tensor is a stepoutput because stepoutput lives in smp step which is also the name of the decorator so python is confused
import datetime import json import math import os import sys import warnings from collections.abc import Mapping from contextlib import contextmanager from dataclasses import dataclass from logging import StreamHandler from typing import Any, Dict, Iterator, List, Optional, Union import numpy as np import torch import torch.distributed as dist from torch import nn from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler from torch.utils.data.distributed import DistributedSampler from .integrations.deepspeed import is_deepspeed_zero3_enabled from .tokenization_utils_base import BatchEncoding from .utils import is_sagemaker_mp_enabled, is_torch_tpu_available, is_training_run_on_sagemaker, logging if is_training_run_on_sagemaker(): logging.add_handler(StreamHandler(sys.stdout)) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm try: from torch.optim.lr_scheduler import SAVE_STATE_WARNING except ImportError: SAVE_STATE_WARNING = "" logger = logging.get_logger(__name__) def get_dataloader_sampler(dataloader): if hasattr(dataloader, "batch_sampler") and dataloader.batch_sampler is not None: return get_dataloader_sampler(dataloader.batch_sampler) elif hasattr(dataloader, "sampler"): return dataloader.sampler def atleast_1d(tensor_or_array: Union[torch.Tensor, np.ndarray]): if isinstance(tensor_or_array, torch.Tensor): if hasattr(torch, "atleast_1d"): tensor_or_array = torch.atleast_1d(tensor_or_array) elif tensor_or_array.ndim < 1: tensor_or_array = tensor_or_array[None] else: tensor_or_array = np.atleast_1d(tensor_or_array) return tensor_or_array def torch_pad_and_concatenate(tensor1, tensor2, padding_index=-100): tensor1 = atleast_1d(tensor1) tensor2 = atleast_1d(tensor2) if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]: return torch.cat((tensor1, tensor2), dim=0) new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tensor1.shape[2:] result = tensor1.new_full(new_shape, padding_index) result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1 result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2 return result def numpy_pad_and_concatenate(array1, array2, padding_index=-100): array1 = atleast_1d(array1) array2 = atleast_1d(array2) if len(array1.shape) == 1 or array1.shape[1] == array2.shape[1]: return np.concatenate((array1, array2), axis=0) new_shape = (array1.shape[0] + array2.shape[0], max(array1.shape[1], array2.shape[1])) + array1.shape[2:] result = np.full_like(array1, padding_index, shape=new_shape) result[: array1.shape[0], : array1.shape[1]] = array1 result[array1.shape[0] :, : array2.shape[1]] = array2 return result def nested_concat(tensors, new_tensors, padding_index=-100): assert type(tensors) == type( new_tensors ), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_concat(t, n, padding_index=padding_index) for t, n in zip(tensors, new_tensors)) elif isinstance(tensors, torch.Tensor): return torch_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index) elif isinstance(tensors, Mapping): return type(tensors)( {k: nested_concat(t, new_tensors[k], padding_index=padding_index) for k, t in tensors.items()} ) elif isinstance(tensors, np.ndarray): return numpy_pad_and_concatenate(tensors, new_tensors, padding_index=padding_index) else: raise TypeError(f"Unsupported type for concatenation: got {type(tensors)}") def find_batch_size(tensors): if isinstance(tensors, (list, tuple)): for t in tensors: result = find_batch_size(t) if result is not None: return result elif isinstance(tensors, Mapping): for key, value in tensors.items(): result = find_batch_size(value) if result is not None: return result elif isinstance(tensors, torch.Tensor): return tensors.shape[0] if len(tensors.shape) >= 1 else None elif isinstance(tensors, np.ndarray): return tensors.shape[0] if len(tensors.shape) >= 1 else None def nested_numpify(tensors): "Numpify `tensors` (even if it's a nested list/tuple/dict of tensors)." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_numpify(t) for t in tensors) if isinstance(tensors, Mapping): return type(tensors)({k: nested_numpify(t) for k, t in tensors.items()}) t = tensors.cpu() if t.dtype == torch.bfloat16: t = t.to(torch.float32) return t.numpy() def nested_detach(tensors): "Detach `tensors` (even if it's a nested list/tuple/dict of tensors)." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_detach(t) for t in tensors) elif isinstance(tensors, Mapping): return type(tensors)({k: nested_detach(t) for k, t in tensors.items()}) return tensors.detach() def nested_xla_mesh_reduce(tensors, name): if is_torch_tpu_available(): import torch_xla.core.xla_model as xm if isinstance(tensors, (list, tuple)): return type(tensors)(nested_xla_mesh_reduce(t, f"{name}_{i}") for i, t in enumerate(tensors)) if isinstance(tensors, Mapping): return type(tensors)( {k: nested_xla_mesh_reduce(t, f"{name}_{i}") for i, (k, t) in enumerate(tensors.items())} ) tensors = atleast_1d(tensors) return xm.mesh_reduce(name, tensors, torch.cat) else: raise ImportError("Torch xla must be installed to use `nested_xla_mesh_reduce`") def distributed_concat(tensor: Any, num_total_examples: Optional[int] = None) -> Any: try: if isinstance(tensor, (tuple, list)): return type(tensor)(distributed_concat(t, num_total_examples) for t in tensor) if isinstance(tensor, Mapping): return type(tensor)({k: distributed_concat(t, num_total_examples) for k, t in tensor.items()}) tensor = atleast_1d(tensor).contiguous() output_tensors = [tensor.clone() for _ in range(dist.get_world_size())] dist.all_gather(output_tensors, tensor) concat = torch.cat(output_tensors, dim=0) if num_total_examples is not None: concat = concat[:num_total_examples] return concat except AssertionError: raise AssertionError("Not currently using distributed training") def distributed_broadcast_scalars( scalars: List[Union[int, float]], num_total_examples: Optional[int] = None, device: Optional[torch.device] = torch.device("cuda"), ) -> torch.Tensor: try: tensorized_scalar = torch.tensor(scalars).to(device) output_tensors = [tensorized_scalar.clone() for _ in range(dist.get_world_size())] dist.all_gather(output_tensors, tensorized_scalar) concat = torch.cat(output_tensors, dim=0) if num_total_examples is not None: concat = concat[:num_total_examples] return concat except AssertionError: raise AssertionError("Not currently using distributed training") def reissue_pt_warnings(caught_warnings): if len(caught_warnings) > 1: for w in caught_warnings: if w.category != UserWarning or w.message != SAVE_STATE_WARNING: warnings.warn(w.message, w.category) @contextmanager def torch_distributed_zero_first(local_rank: int): if local_rank not in [-1, 0]: dist.barrier() yield if local_rank == 0: dist.barrier() class DistributedSamplerWithLoop(DistributedSampler): def __init__(self, dataset, batch_size, **kwargs): super().__init__(dataset, **kwargs) self.batch_size = batch_size def __iter__(self): indices = list(super().__iter__()) remainder = 0 if len(indices) % self.batch_size == 0 else self.batch_size - len(indices) % self.batch_size start_remainder = 1 if self.rank < len(self.dataset) % self.num_replicas else 0 indices += indices[start_remainder : start_remainder + remainder] return iter(indices) class SequentialDistributedSampler(Sampler): def __init__(self, dataset, num_replicas=None, rank=None, batch_size=None): warnings.warn( "SequentialDistributedSampler is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank num_samples = len(self.dataset) if batch_size is not None: self.num_samples = int(math.ceil(num_samples / (batch_size * num_replicas))) * batch_size else: self.num_samples = int(math.ceil(num_samples / num_replicas)) self.total_size = self.num_samples * self.num_replicas self.batch_size = batch_size def __iter__(self): indices = list(range(len(self.dataset))) indices += indices[: (self.total_size - len(indices))] assert ( len(indices) == self.total_size ), f"Indices length {len(indices)} and total size {self.total_size} mismatched" indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples] assert ( len(indices) == self.num_samples ), f"Indices length {len(indices)} and sample number {self.num_samples} mismatched" return iter(indices) def __len__(self): return self.num_samples def get_tpu_sampler(dataset: torch.utils.data.Dataset, batch_size: int): if xm.xrt_world_size() <= 1: return RandomSampler(dataset) return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()) def nested_new_like(arrays, num_samples, padding_index=-100): if isinstance(arrays, (list, tuple)): return type(arrays)(nested_new_like(x, num_samples) for x in arrays) return np.full_like(arrays, padding_index, shape=(num_samples, *arrays.shape[1:])) def expand_like(arrays, new_seq_length, padding_index=-100): result = np.full_like(arrays, padding_index, shape=(arrays.shape[0], new_seq_length) + arrays.shape[2:]) result[:, : arrays.shape[1]] = arrays return result def nested_truncate(tensors, limit): "Truncate `tensors` at `limit` (even if it's a nested list/tuple/dict of tensors)." if isinstance(tensors, (list, tuple)): return type(tensors)(nested_truncate(t, limit) for t in tensors) if isinstance(tensors, Mapping): return type(tensors)({k: nested_truncate(t, limit) for k, t in tensors.items()}) return tensors[:limit] class DistributedTensorGatherer: def __init__(self, world_size, num_samples, make_multiple_of=None, padding_index=-100): warnings.warn( "DistributedTensorGatherer is deprecated and will be removed in v5 of Transformers.", FutureWarning, ) self.world_size = world_size self.num_samples = num_samples total_size = world_size if make_multiple_of is None else world_size * make_multiple_of self.total_samples = int(np.ceil(num_samples / total_size)) * total_size self.process_length = self.total_samples // world_size self._storage = None self._offsets = None self.padding_index = padding_index def add_arrays(self, arrays): if arrays is None: return if self._storage is None: self._storage = nested_new_like(arrays, self.total_samples, padding_index=self.padding_index) self._offsets = list(range(0, self.total_samples, self.process_length)) slice_len, self._storage = self._nested_set_tensors(self._storage, arrays) for i in range(self.world_size): self._offsets[i] += slice_len def _nested_set_tensors(self, storage, arrays): if isinstance(arrays, (list, tuple)): result = [self._nested_set_tensors(x, y) for x, y in zip(storage, arrays)] return result[0][0], type(arrays)(r[1] for r in result) assert ( arrays.shape[0] % self.world_size == 0 ), f"Arrays passed should all have a first dimension multiple of {self.world_size}, found {arrays.shape[0]}." slice_len = arrays.shape[0] // self.world_size for i in range(self.world_size): if len(arrays.shape) == 1: storage[self._offsets[i] : self._offsets[i] + slice_len] = arrays[i * slice_len : (i + 1) * slice_len] else: if len(storage.shape) > 1 and storage.shape[1] < arrays.shape[1]: storage = expand_like(storage, arrays.shape[1], padding_index=self.padding_index) storage[self._offsets[i] : self._offsets[i] + slice_len, : arrays.shape[1]] = arrays[ i * slice_len : (i + 1) * slice_len ] return slice_len, storage def finalize(self): if self._storage is None: return if self._offsets[0] != self.process_length: logger.warning("Not all data has been set. Are you sure you passed all values?") return nested_truncate(self._storage, self.num_samples) @dataclass class LabelSmoother: epsilon: float = 0.1 ignore_index: int = -100 def __call__(self, model_output, labels, shift_labels=False): logits = model_output["logits"] if isinstance(model_output, dict) else model_output[0] if shift_labels: logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() log_probs = -nn.functional.log_softmax(logits, dim=-1) if labels.dim() == log_probs.dim() - 1: labels = labels.unsqueeze(-1) padding_mask = labels.eq(self.ignore_index) labels = torch.clamp(labels, min=0) nll_loss = log_probs.gather(dim=-1, index=labels) smoothed_loss = log_probs.sum(dim=-1, keepdim=True, dtype=torch.float32) nll_loss.masked_fill_(padding_mask, 0.0) smoothed_loss.masked_fill_(padding_mask, 0.0) num_active_elements = padding_mask.numel() - padding_mask.long().sum() nll_loss = nll_loss.sum() / num_active_elements smoothed_loss = smoothed_loss.sum() / (num_active_elements * log_probs.shape[-1]) return (1 - self.epsilon) * nll_loss + self.epsilon * smoothed_loss def get_length_grouped_indices(lengths, batch_size, mega_batch_mult=None, generator=None): if mega_batch_mult is None: mega_batch_mult = min(len(lengths) // (batch_size * 4), 50) if mega_batch_mult == 0: mega_batch_mult = 1 indices = torch.randperm(len(lengths), generator=generator) megabatch_size = mega_batch_mult * batch_size megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)] megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] megabatch_maximums = [lengths[megabatch[0]] for megabatch in megabatches] max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item() megabatches[0][0], megabatches[max_idx][0] = megabatches[max_idx][0], megabatches[0][0] return [i for megabatch in megabatches for i in megabatch] class LengthGroupedSampler(Sampler): r def __init__( self, batch_size: int, dataset: Optional[Dataset] = None, lengths: Optional[List[int]] = None, model_input_name: Optional[str] = None, generator=None, ): if dataset is None and lengths is None: raise ValueError("One of dataset and lengths must be provided.") self.batch_size = batch_size if lengths is None: model_input_name = model_input_name if model_input_name is not None else "input_ids" if ( not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding)) or model_input_name not in dataset[0] ): raise ValueError( "Can only automatically infer lengths for datasets whose items are dictionaries with an " f"'{model_input_name}' key." ) lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info( "If lengths is a torch.Tensor, LengthGroupedSampler will be slow. Converting lengths to List[int]..." ) lengths = lengths.tolist() self.lengths = lengths self.generator = generator def __len__(self): return len(self.lengths) def __iter__(self): indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=self.generator) return iter(indices) class DistributedLengthGroupedSampler(DistributedSampler): r def __init__( self, batch_size: int, dataset: Optional[Dataset] = None, num_replicas: Optional[int] = None, rank: Optional[int] = None, seed: int = 0, drop_last: bool = False, lengths: Optional[List[int]] = None, model_input_name: Optional[str] = None, ): if dataset is None and lengths is None: raise ValueError("One of dataset and lengths must be provided.") if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.batch_size = batch_size self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.drop_last = drop_last if lengths is None: model_input_name = model_input_name if model_input_name is not None else "input_ids" if ( not (isinstance(dataset[0], dict) or isinstance(dataset[0], BatchEncoding)) or model_input_name not in dataset[0] ): raise ValueError( "Can only automatically infer lengths for datasets whose items are dictionaries with an " f"'{model_input_name}' key." ) lengths = [len(feature[model_input_name]) for feature in dataset] elif isinstance(lengths, torch.Tensor): logger.info( "If lengths is a torch.Tensor, DistributedLengthGroupedSampler will be slow. Converting lengths to" " List[int]..." ) lengths = lengths.tolist() self.lengths = lengths if self.drop_last and len(self.lengths) % self.num_replicas != 0: self.num_samples = math.ceil((len(self.lengths) - self.num_replicas) / self.num_replicas) else: self.num_samples = math.ceil(len(self.lengths) / self.num_replicas) self.total_size = self.num_samples * self.num_replicas self.seed = seed def __iter__(self) -> Iterator: g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g) if not self.drop_last: indices += indices[: (self.total_size - len(indices))] else: indices = indices[: self.total_size] assert len(indices) == self.total_size indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples return iter(indices) class ShardSampler(Sampler): def __init__( self, dataset: Dataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, ): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.total_batch_size = total_batch_size = batch_size * num_processes num_batches = len(dataset) // total_batch_size if drop_last else math.ceil(len(dataset) / total_batch_size) self.total_num_samples = num_batches * total_batch_size def __iter__(self): indices = list(range(len(self.dataset))) while len(indices) < self.total_num_samples: indices += indices[: (self.total_num_samples - len(indices))] result = [] for batch_start in range(self.batch_size * self.process_index, self.total_num_samples, self.total_batch_size): result += indices[batch_start : batch_start + self.batch_size] return iter(result) def __len__(self): return self.total_num_samples // self.num_processes class IterableDatasetShard(IterableDataset): def __init__( self, dataset: IterableDataset, batch_size: int = 1, drop_last: bool = False, num_processes: int = 1, process_index: int = 0, seed: int = 0, ): self.dataset = dataset self.batch_size = batch_size self.drop_last = drop_last self.num_processes = num_processes self.process_index = process_index self.seed = seed self.epoch = 0 self.num_examples = 0 def set_epoch(self, epoch): self.epoch = epoch if hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(epoch) def __iter__(self): self.num_examples = 0 if ( not hasattr(self.dataset, "set_epoch") and hasattr(self.dataset, "generator") and isinstance(self.dataset.generator, torch.Generator) ): self.dataset.generator.manual_seed(self.seed + self.epoch) real_batch_size = self.batch_size * self.num_processes process_slice = range(self.process_index * self.batch_size, (self.process_index + 1) * self.batch_size) first_batch = None current_batch = [] for element in self.dataset: self.num_examples += 1 current_batch.append(element) if len(current_batch) == real_batch_size: for i in process_slice: yield current_batch[i] if first_batch is None: first_batch = current_batch.copy() current_batch = [] if not self.drop_last and len(current_batch) > 0: if first_batch is None: first_batch = current_batch.copy() while len(current_batch) < real_batch_size: current_batch += first_batch for i in process_slice: yield current_batch[i] def __len__(self): if self.drop_last: return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size else: return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size def _get_learning_rate(self): if self.is_deepspeed_enabled: try: last_lr = self.lr_scheduler.get_last_lr()[0] except AssertionError as e: if "need to call step" in str(e): logger.warning("tried to get lr value before scheduler/optimizer started stepping, returning lr=0") last_lr = 0 else: raise else: if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): last_lr = self.optimizer.param_groups[0]["lr"] else: last_lr = self.lr_scheduler.get_last_lr()[0] if torch.is_tensor(last_lr): last_lr = last_lr.item() return last_lr def _secs2timedelta(secs): msec = int(abs(secs - int(secs)) * 100) return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}" def metrics_format(self, metrics: Dict[str, float]) -> Dict[str, float]: metrics_copy = metrics.copy() for k, v in metrics_copy.items(): if "_mem_" in k: metrics_copy[k] = f"{ v >> 20 }MB" elif "_runtime" in k: metrics_copy[k] = _secs2timedelta(v) elif k == "total_flos": metrics_copy[k] = f"{ int(v) >> 30 }GF" elif isinstance(metrics_copy[k], float): metrics_copy[k] = round(v, 4) return metrics_copy def log_metrics(self, split, metrics): if not self.is_world_process_zero(): return print(f"***** {split} metrics *****") metrics_formatted = self.metrics_format(metrics) k_width = max(len(str(x)) for x in metrics_formatted.keys()) v_width = max(len(str(x)) for x in metrics_formatted.values()) for key in sorted(metrics_formatted.keys()): print(f" {key: <{k_width}} = {metrics_formatted[key]:>{v_width}}") def save_metrics(self, split, metrics, combined=True): if not self.is_world_process_zero(): return path = os.path.join(self.args.output_dir, f"{split}_results.json") with open(path, "w") as f: json.dump(metrics, f, indent=4, sort_keys=True) if combined: path = os.path.join(self.args.output_dir, "all_results.json") if os.path.exists(path): with open(path, "r") as f: all_metrics = json.load(f) else: all_metrics = {} all_metrics.update(metrics) with open(path, "w") as f: json.dump(all_metrics, f, indent=4, sort_keys=True) def save_state(self): if not self.is_world_process_zero(): return path = os.path.join(self.args.output_dir, "trainer_state.json") self.state.save_to_json(path) def get_model_param_count(model, trainable_only=False): if is_deepspeed_zero3_enabled(): def numel(p): return p.ds_numel if hasattr(p, "ds_numel") else p.numel() else: def numel(p): return p.numel() return sum(numel(p) for p in model.parameters() if not trainable_only or p.requires_grad) def get_parameter_names(model, forbidden_layer_types): result = [] for name, child in model.named_children(): result += [ f"{name}.{n}" for n in get_parameter_names(child, forbidden_layer_types) if not isinstance(child, tuple(forbidden_layer_types)) ] result += list(model._parameters.keys()) return result def get_module_class_from_name(module, name): modules_children = list(module.children()) if module.__class__.__name__ == name: return module.__class__ elif len(modules_children) == 0: return else: for child_module in modules_children: module_class = get_module_class_from_name(child_module, name) if module_class is not None: return module_class def remove_dummy_checkpoint(is_main_process, output_dir, filenames): if is_main_process: for filename in filenames: file = os.path.join(output_dir, filename) if os.path.isfile(file): os.remove(file) if is_sagemaker_mp_enabled(): import smdistributed.modelparallel.torch as smp @smp.step() def smp_forward_backward(model, inputs, gradient_accumulation_steps=1): outputs = model(**inputs) loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] loss /= gradient_accumulation_steps model.backward(loss) return loss @smp.step() def smp_forward_only(model, inputs): return model(**inputs) def smp_gather(tensor): if isinstance(tensor, (list, tuple)): return type(tensor)(smp_gather(t) for t in tensor) elif isinstance(tensor, dict): return type(tensor)({k: smp_gather(v) for k, v in tensor.items()}) elif not isinstance(tensor, torch.Tensor): raise TypeError( f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors." ) all_tensors = smp.allgather(tensor, smp.CommGroup.DP_GROUP) all_tensors = [atleast_1d(t) for t in all_tensors] return torch.cat([t.cpu() for t in all_tensors], dim=0) def smp_nested_concat(tensor): if isinstance(tensor, (list, tuple)): return type(tensor)(smp_nested_concat(t) for t in tensor) elif isinstance(tensor, dict): return type(tensor)({k: smp_nested_concat(v) for k, v in tensor.items()}) return tensor.concat().detach().cpu()
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license override self model generationconfig if a generationconfig is specified in args priority args generationconfig model generationconfig default generationconfig loads a generation generationconfig from the seq2seqtrainingarguments generationconfig arguments args genconfigarg str or generation generationconfig seq2seqtrainingarguments generationconfig argument returns a generation generationconfig generationconfig provided nothing to do str or path figuring if it is path pointing to a file pointing to a directory or else a model id or url this step is required in order to determine configfilename dir path model id or url run evaluation and returns metrics the calling script will be responsible for providing a method to compute metrics as they are taskdependent pass it to the init computemetrics argument you can also subclass and override this method to inject custom behavior args evaldataset dataset optional pass a dataset if you wish to override self evaldataset if it is an datasets dataset columns not accepted by the model forward method are automatically removed it must implement the len method ignorekeys liststr optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metrickeyprefix str optional defaults to eval an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named evalbleu if the prefix is eval default maxlength int optional the maximum target length to use when predicting with the generate method numbeams int optional number of beams for beam search that will be used when predicting with the generate method 1 means no beam search genkwargs additional generate specific kwargs returns a dictionary containing the evaluation loss and the potential metrics computed from the predictions the dictionary also contains the epoch number which comes from the training state use legacy argument setting if a the option is not explicitly passed and b the argument is set in the training args we don t want to drop samples in general run prediction and returns predictions and potential metrics depending on the dataset and your use case your test dataset may contain labels in that case this method will also return metrics like in evaluate args testdataset dataset dataset to run the predictions on if it is a datasets dataset columns not accepted by the model forward method are automatically removed has to implement the method len ignorekeys liststr optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metrickeyprefix str optional defaults to eval an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named evalbleu if the prefix is eval default maxlength int optional the maximum target length to use when predicting with the generate method numbeams int optional number of beams for beam search that will be used when predicting with the generate method 1 means no beam search genkwargs additional generate specific kwargs tip if your predictions or labels have different sequence lengths for instance because you re doing dynamic padding in a token classification task the predictions will be padded on the right to allow for concatenation into one array the padding index is 100 tip returns namedtuple a namedtuple with the following keys predictions np ndarray the predictions on testdataset labelids np ndarray optional the labels if the dataset contained some metrics dictstr float optional the potential dictionary of metrics if the dataset contained labels use legacy argument setting if a the option is not explicitly passed and b the argument is set in the training args perform an evaluation step on model using inputs subclass and override to inject custom behavior args model nn module the model to evaluate inputs dictstr uniontorch tensor any the inputs and targets of the model the dictionary will be unpacked before being fed to the model most models expect the targets under the argument labels check your model s documentation for all accepted arguments predictionlossonly bool whether or not to return the loss only genkwargs additional generate specific kwargs return tupleoptionalfloat optionaltorch tensor optionaltorch tensor a tuple with the loss logits and labels each being optional priority handled in generate nonnone genkwargs model generationconfig default generationconfig if the decoderinputids was created from labels evict the former so that the model can freely generate otherwise it would continue generating from the padded decoderinputids temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop todo remove this hack when the legacy code that initializes generationconfig from a model config is removed in https github comhuggingfacetransformersblob98d88b23f54e5a23e741833f1e973fdf600cc2c5srctransformersgenerationutils pyl1183 retrieves generationconfig from model generationconfig in case the batch is shorter than max length the output should be padded if pad token is not defined at least eos token has to be defined 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license override self model generation_config if a generationconfig is specified in args priority args generation_config model generation_config default generationconfig loads a generation generationconfig from the seq2seqtrainingarguments generation_config arguments args gen_config_arg str or generation generationconfig seq2seqtrainingarguments generation_config argument returns a generation generationconfig generationconfig provided nothing to do str or path figuring if it is path pointing to a file pointing to a directory or else a model id or url this step is required in order to determine config_file_name dir path model id or url run evaluation and returns metrics the calling script will be responsible for providing a method to compute metrics as they are task dependent pass it to the init compute_metrics argument you can also subclass and override this method to inject custom behavior args eval_dataset dataset optional pass a dataset if you wish to override self eval_dataset if it is an datasets dataset columns not accepted by the model forward method are automatically removed it must implement the __len__ method ignore_keys list str optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metric_key_prefix str optional defaults to eval an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named eval_bleu if the prefix is eval default max_length int optional the maximum target length to use when predicting with the generate method num_beams int optional number of beams for beam search that will be used when predicting with the generate method 1 means no beam search gen_kwargs additional generate specific kwargs returns a dictionary containing the evaluation loss and the potential metrics computed from the predictions the dictionary also contains the epoch number which comes from the training state use legacy argument setting if a the option is not explicitly passed and b the argument is set in the training args we don t want to drop samples in general run prediction and returns predictions and potential metrics depending on the dataset and your use case your test dataset may contain labels in that case this method will also return metrics like in evaluate args test_dataset dataset dataset to run the predictions on if it is a datasets dataset columns not accepted by the model forward method are automatically removed has to implement the method __len__ ignore_keys list str optional a list of keys in the output of your model if it is a dictionary that should be ignored when gathering predictions metric_key_prefix str optional defaults to eval an optional prefix to be used as the metrics key prefix for example the metrics bleu will be named eval_bleu if the prefix is eval default max_length int optional the maximum target length to use when predicting with the generate method num_beams int optional number of beams for beam search that will be used when predicting with the generate method 1 means no beam search gen_kwargs additional generate specific kwargs tip if your predictions or labels have different sequence lengths for instance because you re doing dynamic padding in a token classification task the predictions will be padded on the right to allow for concatenation into one array the padding index is 100 tip returns namedtuple a namedtuple with the following keys predictions np ndarray the predictions on test_dataset label_ids np ndarray optional the labels if the dataset contained some metrics dict str float optional the potential dictionary of metrics if the dataset contained labels use legacy argument setting if a the option is not explicitly passed and b the argument is set in the training args perform an evaluation step on model using inputs subclass and override to inject custom behavior args model nn module the model to evaluate inputs dict str union torch tensor any the inputs and targets of the model the dictionary will be unpacked before being fed to the model most models expect the targets under the argument labels check your model s documentation for all accepted arguments prediction_loss_only bool whether or not to return the loss only gen_kwargs additional generate specific kwargs return tuple optional float optional torch tensor optional torch tensor a tuple with the loss logits and labels each being optional priority handled in generate non none gen_kwargs model generation_config default generationconfig if the decoder_input_ids was created from labels evict the former so that the model can freely generate otherwise it would continue generating from the padded decoder_input_ids temporary hack to ensure the generation config is not initialized for each iteration of the evaluation loop todo remove this hack when the legacy code that initializes generation_config from a model config is removed in https github com huggingface transformers blob 98d88b23f54e5a23e741833f1e973fdf600cc2c5 src transformers generation utils py l1183 retrieves generationconfig from model generation_config in case the batch is shorter than max length the output should be padded if pad token is not defined at least eos token has to be defined
from copy import deepcopy from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import Dataset from .generation.configuration_utils import GenerationConfig from .integrations.deepspeed import is_deepspeed_zero3_enabled from .trainer import Trainer from .utils import logging if TYPE_CHECKING: from .data.data_collator import DataCollator from .modeling_utils import PreTrainedModel from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import TrainerCallback from .trainer_utils import EvalPrediction, PredictionOutput from .training_args import TrainingArguments logger = logging.get_logger(__name__) class Seq2SeqTrainer(Trainer): def __init__( self, model: Union["PreTrainedModel", nn.Module] = None, args: "TrainingArguments" = None, data_collator: Optional["DataCollator"] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, tokenizer: Optional["PreTrainedTokenizerBase"] = None, model_init: Optional[Callable[[], "PreTrainedModel"]] = None, compute_metrics: Optional[Callable[["EvalPrediction"], Dict]] = None, callbacks: Optional[List["TrainerCallback"]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, ): super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) if self.args.generation_config is not None: gen_config = self.load_generation_config(self.args.generation_config) self.model.generation_config = gen_config @staticmethod def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> GenerationConfig: if isinstance(gen_config_arg, GenerationConfig): return deepcopy(gen_config_arg) pretrained_model_name = Path(gen_config_arg) if isinstance(gen_config_arg, str) else gen_config_arg config_file_name = None if pretrained_model_name.is_file(): config_file_name = pretrained_model_name.name pretrained_model_name = pretrained_model_name.parent elif pretrained_model_name.is_dir(): pass else: pretrained_model_name = gen_config_arg gen_config = GenerationConfig.from_pretrained(pretrained_model_name, config_file_name) return gen_config def evaluate( self, eval_dataset: Optional[Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", **gen_kwargs, ) -> Dict[str, float]: gen_kwargs = gen_kwargs.copy() if ( gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None and self.args.generation_max_length is not None ): gen_kwargs["max_length"] = self.args.generation_max_length if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: gen_kwargs["num_beams"] = self.args.generation_num_beams self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) def predict( self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test", **gen_kwargs, ) -> "PredictionOutput": gen_kwargs = gen_kwargs.copy() if ( gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None and self.args.generation_max_length is not None ): gen_kwargs["max_length"] = self.args.generation_max_length if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None: gen_kwargs["num_beams"] = self.args.generation_num_beams self.gather_function = self.accelerator.gather self._gen_kwargs = gen_kwargs return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, **gen_kwargs, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: if not self.args.predict_with_generate or prediction_loss_only: return super().prediction_step( model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys ) has_labels = "labels" in inputs inputs = self._prepare_inputs(inputs) if len(gen_kwargs) == 0 and hasattr(self, "_gen_kwargs"): gen_kwargs = self._gen_kwargs.copy() if "num_beams" in gen_kwargs and gen_kwargs["num_beams"] is None: gen_kwargs.pop("num_beams") if "max_length" in gen_kwargs and gen_kwargs["max_length"] is None: gen_kwargs.pop("max_length") default_synced_gpus = True if is_deepspeed_zero3_enabled() else False gen_kwargs["synced_gpus"] = ( gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus ) generation_inputs = inputs.copy() if ( "labels" in generation_inputs and "decoder_input_ids" in generation_inputs and generation_inputs["labels"].shape == generation_inputs["decoder_input_ids"].shape ): generation_inputs = { k: v for k, v in inputs.items() if k not in ("decoder_input_ids", "decoder_attention_mask") } generated_tokens = self.model.generate(**generation_inputs, **gen_kwargs) if self.model.generation_config._from_model_config: self.model.generation_config._from_model_config = False gen_config = self.model.generation_config if generated_tokens.shape[-1] < gen_config.max_length: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_length) elif gen_config.max_new_tokens is not None and generated_tokens.shape[-1] < gen_config.max_new_tokens + 1: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_config.max_new_tokens + 1) with torch.no_grad(): if has_labels: with self.compute_loss_context_manager(): outputs = model(**inputs) if self.label_smoother is not None: loss = self.label_smoother(outputs, inputs["labels"]).mean().detach() else: loss = (outputs["loss"] if isinstance(outputs, dict) else outputs[0]).mean().detach() else: loss = None if self.args.prediction_loss_only: return loss, None, None if has_labels: labels = inputs["labels"] if labels.shape[-1] < gen_config.max_length: labels = self._pad_tensors_to_max_len(labels, gen_config.max_length) elif gen_config.max_new_tokens is not None and labels.shape[-1] < gen_config.max_new_tokens + 1: labels = self._pad_tensors_to_max_len(labels, gen_config.max_new_tokens + 1) else: labels = None return loss, generated_tokens, labels def _pad_tensors_to_max_len(self, tensor, max_length): if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"): pad_token_id = ( self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id ) else: if self.model.config.pad_token_id is not None: pad_token_id = self.model.config.pad_token_id else: raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors") padded_tensor = pad_token_id * torch.ones( (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device ) padded_tensor[:, : tensor.shape[-1]] = tensor return padded_tensor
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tensorflow trainer class import datetime import math import os import warnings from typing import callable dict optional tuple from utils import envvarstruevalues integrations must be imported before ml frameworks isort off from integrations import iscometavailable iswandbavailable isort on import numpy as np import tensorflow as tf from tensorflow python distribute values import perreplica from modelingtfutils import tfpretrainedmodel from optimizationtf import gradientaccumulator createoptimizer from trainerutils import prefixcheckpointdir evalprediction intervalstrategy predictionoutput enablefulldeterminism setseed from trainingargstf import tftrainingarguments from utils import logging if iswandbavailable import wandb if iscometavailable import cometml logger logging getloggername class tftrainer def init self model tfpretrainedmodel args tftrainingarguments traindataset optionaltf data dataset none evaldataset optionaltf data dataset none computemetrics optionalcallableevalprediction dict none tbwriter optionaltf summary summarywriter none optimizers tupletf keras optimizers optimizer tf keras optimizers schedules learningrateschedule none none self model model self args args self traindataset traindataset self evaldataset evaldataset self computemetrics computemetrics self optimizer self lrscheduler optimizers self gradientaccumulator gradientaccumulator self globalstep 0 self epochlogging 0 self evalloss tf keras metrics sum warnings warn the class tftrainer is deprecated and will be removed in version 5 of transformers we recommend using native keras instead by calling methods like fit and predict directly on the model object detailed examples of the keras style can be found in our examples at https github comhuggingfacetransformerstreemainexamplestensorflow futurewarning if tbwriter is not none self tbwriter tbwriter else self tbwriter tf summary createfilewriterself args loggingdir if iswandbavailable self setupwandb elif os getenvwandbdisabled upper not in envvarstruevalues logger info you are instantiating a trainer but wb is not installed to use wandb logging run pip install wandb wandb login see https docs wandb comhuggingface if iscometavailable self setupcomet elif os environ getcometmode disabled logger info to use cometml logging run pipconda install cometml see https www comet mldocspythonsdkhuggingface enablefulldeterminismself args seed if self args fulldeterminism else setseedself args seed def gettraintfdatasetself tf data dataset if self traindataset is none raise valueerrortrainer training requires a traindataset self totaltrainbatchsize self args trainbatchsize self args gradientaccumulationsteps self numtrainexamples self traindataset cardinality numpy if self numtrainexamples 0 raise valueerrorthe training dataset must have an asserted cardinality ds self traindataset repeat shuffleself numtrainexamples seedself args seed batchself totaltrainbatchsize dropremainderself args dataloaderdroplast prefetchtf data experimental autotune return self args strategy experimentaldistributedatasetds def getevaltfdatasetself evaldataset optionaltf data dataset none tf data dataset if evaldataset is none and self evaldataset is none raise valueerrortrainer evaluation requires an evaldataset evaldataset evaldataset if evaldataset is not none else self evaldataset numexamples evaldataset cardinality numpy if numexamples 0 raise valueerrorthe training dataset must have an asserted cardinality approx math floor if self args dataloaderdroplast else math ceil steps approxnumexamples self args evalbatchsize ds evaldataset repeat batchself args evalbatchsize dropremainderself args dataloaderdroplast prefetchtf data experimental autotune return self args strategy experimentaldistributedatasetds steps numexamples def gettesttfdatasetself testdataset tf data dataset tf data dataset numexamples testdataset cardinality numpy if numexamples 0 raise valueerrorthe training dataset must have an asserted cardinality steps math ceilnumexamples self args evalbatchsize ds testdataset batchself args evalbatchsize prefetchtf data experimental autotune return self args strategy experimentaldistributedatasetds steps numexamples def createoptimizerandschedulerself numtrainingsteps int if not self optimizer and not self lrscheduler warmupsteps self args warmupsteps if self args warmupsteps 0 else math ceilnumtrainingsteps self args warmupratio self optimizer self lrscheduler createoptimizer self args learningrate numtrainingsteps warmupsteps adambeta1self args adambeta1 adambeta2self args adambeta2 adamepsilonself args adamepsilon weightdecayrateself args weightdecay powerself args polypower def setupwandbself logger info automatic weights biases logging enabled to disable set os environwandbdisabled true combineddict self model config todict self args tosanitizeddict wandb initprojectos getenvwandbproject huggingface configcombineddict nameself args runname def setupcometself cometmode os getenvcometmode online upper args projectname os getenvcometprojectname huggingface experiment none if cometmode online experiment cometml experimentargs logger infoautomatic comet ml online logging enabled elif cometmode offline argsofflinedirectory os getenvcometofflinedirectory experiment cometml offlineexperimentargs logger infoautomatic comet ml offline logging enabled use comet upload when finished if experiment is not none experiment setmodelgraphself model frameworktransformers experiment logparametersself args prefixargs frameworktransformers experiment logparametersself model config prefixconfig frameworktransformers def predictionloop self dataset tf data dataset steps int numexamples int description str predictionlossonly optionalbool none predictionoutput predictionlossonly predictionlossonly if predictionlossonly is not none else self args predictionlossonly logger infof running description logger infof num examples in dataset numexamples if description evaluation logger infof num examples in used in evaluation self args evalbatchsize steps logger infof batch size self args evalbatchsize labelids np ndarray none preds np ndarray none self evalloss resetstates reset the past mems state at the beginning of the evaluation if necessary if self args pastindex 0 self past none for step batch in enumeratedataset logits self distributedpredictionstepsbatch labels batch if not predictionlossonly if isinstancelogits tuple logits logits0 if isinstancelabels tuple labels labels0 if self args nreplicas 1 for val in logits values if preds is none preds val numpy else preds np appendpreds val numpy axis0 for val in labels values if labelids is none labelids val numpy else labelids np appendlabelids val numpy axis0 else if preds is none preds logits numpy else preds np appendpreds logits numpy axis0 if labelids is none labelids labels numpy else labelids np appendlabelids labels numpy axis0 if step steps 1 break if self computemetrics is not none and preds is not none and labelids is not none metrics self computemetricsevalpredictionpredictionspreds labelidslabelids else metrics metricsevalloss self evalloss result numpy steps for key in listmetrics keys if not key startswitheval metricsfevalkey metrics popkey if self args pastindex and hasattrself past clean the state at the end of training delattrself past return predictionoutputpredictionspreds labelidslabelids metricsmetrics def logself logs dictstr float none logsepoch self epochlogging if self tbwriter with self tbwriter asdefault for k v in logs items tf summary scalark v stepself globalstep self tbwriter flush if iswandbavailable wandb loglogs stepself globalstep if iscometavailable experiment cometml config getglobalexperiment if experiment is not none experiment logmetrics logs stepself globalstep epochself epochlogging frameworktransformers output logs step self globalstep logger infooutput def evaluateself evaldataset optionaltf data dataset none dictstr float evalds steps numexamples self getevaltfdatasetevaldataset output self predictionloopevalds steps numexamples descriptionevaluation logs output metrics logsepoch self epochlogging self loglogs return output metrics def predictionstep self features tf tensor labels tf tensor nbinstancesinglobalbatch tf tensor tf tensor perexampleloss logits self runmodelfeatures labels false scaledloss perexampleloss tf castnbinstancesinglobalbatch dtypeperexampleloss dtype self evalloss updatestatescaledloss return logits tf function def distributedpredictionstepsself batch nbinstancesinbatch self computenbinstancesbatch inputs self getstepinputsbatch nbinstancesinbatch logits self args strategy runself predictionstep inputs return logits def trainself none trainds self gettraintfdataset if self args debug tf summary traceongraphtrue profilertrue self gradientaccumulator reset numupdatestepsperepoch self numtrainexamples self totaltrainbatchsize in fact self args dataloaderdroplast has no effect in trainertf py because the dataset is repeated before being batched it has the effect only when tpu is used which requires explicit tensor shape in order to make the gradient accumulation implementation work approx math floor if self args dataloaderdroplast else math ceil numupdatestepsperepoch approxnumupdatestepsperepoch at least one update for each epoch numupdatestepsperepoch maxnumupdatestepsperepoch 1 self stepsperepoch numupdatestepsperepoch if self args maxsteps 0 ttotal self args maxsteps epochs self args maxsteps self stepsperepoch int self args maxsteps self stepsperepoch 0 else ttotal self stepsperepoch self args numtrainepochs epochs self args numtrainepochs since self args numtrainepochs can be float we make epochs be a float always epochs floatepochs with self args strategy scope self createoptimizerandschedulernumtrainingstepsttotal folder os path joinself args outputdir prefixcheckpointdir ckpt tf train checkpointoptimizerself optimizer modelself model self model ckptmanager tf train checkpointmanagerckpt folder maxtokeepself args savetotallimit iterations self optimizer iterations epochstrained 0 stepstrainedincurrentepoch 0 if self model ckptmanager latestcheckpoint logger info fcheckpoint file self model ckptmanager latestcheckpoint found and restoring from checkpoint ckpt restoreself model ckptmanager latestcheckpoint expectpartial self globalstep iterations numpy epochstrained self globalstep self stepsperepoch stepstrainedincurrentepoch self globalstep self stepsperepoch logger info continuing training from checkpoint will skip to saved globalstep logger infof continuing training from epoch epochstrained logger infof continuing training from global step self globalstep logger infof will skip the first stepstrainedincurrentepoch steps in the first epoch tf summary experimental setstepself globalstep with self tbwriter asdefault tf summary textargs self args tojsonstring self tbwriter flush logger info running training logger infof num examples self numtrainexamples todo we might want to print a more precise epochs if self args maxsteps 0 logger infof num epochs epochs logger infof instantaneous batch size per device self args perdevicetrainbatchsize logger info f total train batch size w parallel distributed accumulation self totaltrainbatchsize logger infof gradient accumulation steps self args gradientaccumulationsteps logger infof steps per epoch self stepsperepoch logger infof total optimization steps ttotal self trainloss tf keras metrics sum starttime datetime datetime now for epochiter in rangeepochstrained intepochs reset the past mems state at the beginning of each epoch if necessary if self args pastindex 0 self past none for step batch in enumeratetrainds skip past any already trained steps if resuming training if stepstrainedincurrentepoch 0 stepstrainedincurrentepoch 1 continue self distributedtrainingstepsbatch self globalstep iterations numpy self epochlogging epochiter step 1 self stepsperepoch trainingloss self trainloss result step 1 if self args debug logs logsloss trainingloss numpy logsepoch self epochlogging self loglogs if self globalstep 1 and self args debug with self tbwriter asdefault tf summary traceexport nametraining stepself globalstep profileroutdirself args loggingdir if self args evalsteps 0 and self args evaluationstrategy intervalstrategy steps and self globalstep self args evalsteps 0 self evaluate if self args loggingsteps 0 and self globalstep self args loggingsteps 0 or self globalstep 1 and self args loggingfirststep logs logsloss trainingloss numpy logslearningrate self lrschedulerself globalstep numpy logsepoch self epochlogging self loglogs if self args savesteps 0 and self globalstep self args savesteps 0 ckptsavepath self model ckptmanager save logger infofsaving checkpoint for step self globalstep at ckptsavepath if self args maxsteps 0 and self globalstep ttotal break if self globalstep self stepsperepoch 0 break self trainloss resetstates if self args maxsteps 0 and self globalstep self args maxsteps break endtime datetime datetime now logger infoftraining took strendtime starttime if self args pastindex and hasattrself past clean the state at the end of training delattrself past def trainingstepself features labels nbinstancesinglobalbatch perexampleloss self runmodelfeatures labels true scaledloss perexampleloss tf castnbinstancesinglobalbatch dtypeperexampleloss dtype gradients tf gradientsscaledloss self model trainablevariables gradients g if g is not none else tf zeroslikev for g v in zipgradients self model trainablevariables if self args gradientaccumulationsteps 1 self gradientaccumulatorgradients self trainloss updatestatescaledloss if self args gradientaccumulationsteps 1 return gradients def applygradientsself features labels nbinstancesinglobalbatch if self args gradientaccumulationsteps 1 gradients self trainingstepfeatures labels nbinstancesinglobalbatch self optimizer applygradientslistzipgradients self model trainablevariables else for in tf rangeself args gradientaccumulationsteps reducedfeatures k ft self args trainbatchsize self args nreplicas for k ft in features items if tf istensorlabels reducedlabels labels self args trainbatchsize self args nreplicas elif isinstancelabels dict reducedlabels k lbl self args trainbatchsize self args nreplicas for k lbl in labels items else raise valueerrorthe labels must be either a tf tensor or a dict self trainingstepreducedfeatures reducedlabels nbinstancesinglobalbatch features k tf concat ftself args trainbatchsize self args nreplicas reducedfeaturesk axis0 for k ft in features items if tf istensorlabels labels tf concat labelsself args trainbatchsize self args nreplicas reducedlabels axis0 elif isinstancelabels dict labels k tf concat lblself args trainbatchsize self args nreplicas reducedlabelsk axis0 for k lbl in labels items else raise valueerrorthe labels must be either a tf tensor or a dict gradients self gradientaccumulator gradients gradients tf clipbyvaluegrad self args maxgradnorm self args maxgradnorm for grad in gradients self optimizer applygradientslistzipgradients self model trainablevariables self gradientaccumulator reset tf function def distributedtrainingstepsself batch with self args strategy scope nbinstancesinbatch self computenbinstancesbatch inputs self getstepinputsbatch nbinstancesinbatch self args strategy runself applygradients inputs staticmethod def computenbinstancesbatch labels batch1 if isinstancelabels perreplica labels tf concatlabels values axis0 nbinstances tf reducesumtf castlabels 100 dtypetf int32 return nbinstances staticmethod def getstepinputsbatch nbinstances features labels batch if isinstancelabels perreplica need to make a perreplica objects for nbinstances nbinstances perreplicanbinstances lenlabels values stepinputs features labels nbinstances return stepinputs def runmodelself features labels training if self args pastindex 0 and getattrself past none is not none featuresmems self past if isinstancelabels dict outputs self modelfeatures trainingtraining labels 2 else outputs self modelfeatures labelslabels trainingtraining 2 loss logits outputs 2 if self args pastindex 0 self past outputsself args pastindex return loss logits def predictself testdataset tf data dataset predictionoutput testds steps numexamples self gettesttfdatasettestdataset return self predictionlooptestds steps numexamples descriptionprediction def savemodelself outputdir optionalstr none outputdir outputdir if outputdir is not none else self args outputdir logger infofsaving model in outputdir if not isinstanceself model tfpretrainedmodel raise valueerrortrainer model appears to not be a pretrainedmodel self model savepretrainedoutputdir 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tensorflow trainer class integrations must be imported before ml frameworks isort off isort on tftrainer is a simple but feature complete training and eval loop for tensorflow optimized for transformers args model tfpretrainedmodel the model to train evaluate or use for predictions args tftrainingarguments the arguments to tweak training train_dataset tf data dataset optional the dataset to use for training the dataset should yield tuples of features labels where features is a dict of input features and labels is the labels if labels is a tensor the loss is calculated by the model by calling model features labels labels if labels is a dict such as when using a questionanswering head model with multiple targets the loss is instead calculated by calling model features labels eval_dataset tf data dataset optional the dataset to use for evaluation the dataset should yield tuples of features labels where features is a dict of input features and labels is the labels if labels is a tensor the loss is calculated by the model by calling model features labels labels if labels is a dict such as when using a questionanswering head model with multiple targets the loss is instead calculated by calling model features labels compute_metrics callable evalprediction dict optional the function that will be used to compute metrics at evaluation must take a evalprediction and return a dictionary string to metric values tb_writer tf summary summarywriter optional object to write to tensorboard optimizers tuple tf keras optimizers optimizer tf keras optimizers schedules learningrateschedule optional a tuple containing the optimizer and the scheduler to use the optimizer default to an instance of tf keras optimizers adam if args weight_decay_rate is 0 else an instance of adamweightdecay the scheduler will default to an instance of tf keras optimizers schedules polynomialdecay if args num_warmup_steps is 0 else an instance of warmup returns the training tf data dataset subclass and override this method if you want to inject some custom behavior returns the evaluation tf data dataset args eval_dataset tf data dataset optional if provided will override self eval_dataset the dataset should yield tuples of features labels where features is a dict of input features and labels is the labels if labels is a tensor the loss is calculated by the model by calling model features labels labels if labels is a dict such as when using a questionanswering head model with multiple targets the loss is instead calculated by calling model features labels subclass and override this method if you want to inject some custom behavior returns a test tf data dataset args test_dataset tf data dataset the dataset to use the dataset should yield tuples of features labels where features is a dict of input features and labels is the labels if labels is a tensor the loss is calculated by the model by calling model features labels labels if labels is a dict such as when using a questionanswering head model with multiple targets the loss is instead calculated by calling model features labels subclass and override this method if you want to inject some custom behavior setup the optimizer and the learning rate scheduler we provide a reasonable default that works well if you want to use something else you can pass a tuple in the tftrainer s init through optimizers or subclass and override this method setup the optional weights biases wandb integration one can subclass and override this method to customize the setup if needed find more information here https docs wandb com huggingface __ you can also override the following environment variables environment wandb_project optional str huggingface by default set this to a custom string to store results in a different project wandb_disabled optional boolean defaults to false set to true to disable wandb entirely setup the optional comet ml integration environment comet_mode optional str offline online or disabled comet_project_name optional str comet ml project name for experiments comet_offline_directory optional str folder to use for saving offline experiments when comet_mode is offline for a number of configurable items in the environment see here https www comet ml docs python sdk advanced comet configuration variables __ prediction evaluation loop shared by tftrainer evaluate and tftrainer predict works both with or without labels reset the past mems state at the beginning of the evaluation if necessary clean the state at the end of training log logs on the various objects watching training subclass and override this method to inject custom behavior args logs dict str float the values to log run evaluation and returns metrics the calling script will be responsible for providing a method to compute metrics as they are task dependent pass it to the init compute_metrics argument args eval_dataset tf data dataset optional pass a dataset if you wish to override self eval_dataset the dataset should yield tuples of features labels where features is a dict of input features and labels is the labels if labels is a tensor the loss is calculated by the model by calling model features labels labels if labels is a dict such as when using a questionanswering head model with multiple targets the loss is instead calculated by calling model features labels returns a dictionary containing the evaluation loss and the potential metrics computed from the predictions compute the prediction on features and update the loss with labels subclass and override to inject some custom behavior train method to train the model in fact self args dataloader_drop_last has no effect in trainer_tf py because the dataset is repeated before being batched it has the effect only when tpu is used which requires explicit tensor shape in order to make the gradient accumulation implementation work at least one update for each epoch since self args num_train_epochs can be float we make epochs be a float always todo we might want to print a more precise epochs if self args max_steps 0 reset the past mems state at the beginning of each epoch if necessary skip past any already trained steps if resuming training clean the state at the end of training perform a training step on features and labels subclass and override to inject some custom behavior need to make a perreplica objects for nb_instances computes the loss of the given features and labels pair subclass and override this method if you want to inject some custom behavior args features tf tensor a batch of input features labels tf tensor a batch of labels training bool whether or not to run the model in training mode returns a tuple of two tf tensor the loss and logits run prediction and returns predictions and potential metrics depending on the dataset and your use case your test dataset may contain labels in that case this method will also return metrics like in evaluate args test_dataset tf data dataset dataset to run the predictions on the dataset should yield tuples of features labels where features is a dict of input features and labels is the labels if labels is a tensor the loss is calculated by the model by calling model features labels labels if labels is a dict such as when using a questionanswering head model with multiple targets the loss is instead calculated by calling model features labels returns namedtuple a namedtuple with the following keys predictions np ndarray the predictions on test_dataset label_ids np ndarray optional the labels if the dataset contained some metrics dict str float optional the potential dictionary of metrics if the dataset contained labels will save the model so you can reload it using from_pretrained
import datetime import math import os import warnings from typing import Callable, Dict, Optional, Tuple from .utils import ENV_VARS_TRUE_VALUES from .integrations import ( is_comet_available, is_wandb_available, ) import numpy as np import tensorflow as tf from tensorflow.python.distribute.values import PerReplica from .modeling_tf_utils import TFPreTrainedModel from .optimization_tf import GradientAccumulator, create_optimizer from .trainer_utils import ( PREFIX_CHECKPOINT_DIR, EvalPrediction, IntervalStrategy, PredictionOutput, enable_full_determinism, set_seed, ) from .training_args_tf import TFTrainingArguments from .utils import logging if is_wandb_available(): import wandb if is_comet_available(): import comet_ml logger = logging.get_logger(__name__) class TFTrainer: def __init__( self, model: TFPreTrainedModel, args: TFTrainingArguments, train_dataset: Optional[tf.data.Dataset] = None, eval_dataset: Optional[tf.data.Dataset] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, tb_writer: Optional[tf.summary.SummaryWriter] = None, optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = ( None, None, ), ): self.model = model self.args = args self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.compute_metrics = compute_metrics self.optimizer, self.lr_scheduler = optimizers self.gradient_accumulator = GradientAccumulator() self.global_step = 0 self.epoch_logging = 0 self.eval_loss = tf.keras.metrics.Sum() warnings.warn( "The class `TFTrainer` is deprecated and will be removed in version 5 of Transformers. " "We recommend using native Keras instead, by calling methods like `fit()` and `predict()` " "directly on the model object. Detailed examples of the Keras style can be found in our " "examples at https://github.com/huggingface/transformers/tree/main/examples/tensorflow", FutureWarning, ) if tb_writer is not None: self.tb_writer = tb_writer else: self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir) if is_wandb_available(): self.setup_wandb() elif os.getenv("WANDB_DISABLED", "").upper() not in ENV_VARS_TRUE_VALUES: logger.info( "You are instantiating a Trainer but W&B is not installed. To use wandb logging, " "run `pip install wandb && wandb login` see https://docs.wandb.com/huggingface." ) if is_comet_available(): self.setup_comet() elif os.environ.get("COMET_MODE") != "DISABLED": logger.info( "To use comet_ml logging, run `pip/conda install comet_ml` " "see https://www.comet.ml/docs/python-sdk/huggingface/" ) enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) def get_train_tfdataset(self) -> tf.data.Dataset: if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps self.num_train_examples = self.train_dataset.cardinality().numpy() if self.num_train_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") ds = ( self.train_dataset.repeat() .shuffle(self.num_train_examples, seed=self.args.seed) .batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last) .prefetch(tf.data.experimental.AUTOTUNE) ) return self.args.strategy.experimental_distribute_dataset(ds) def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset: if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset num_examples = eval_dataset.cardinality().numpy() if num_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") approx = math.floor if self.args.dataloader_drop_last else math.ceil steps = approx(num_examples / self.args.eval_batch_size) ds = ( eval_dataset.repeat() .batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last) .prefetch(tf.data.experimental.AUTOTUNE) ) return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset: num_examples = test_dataset.cardinality().numpy() if num_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") steps = math.ceil(num_examples / self.args.eval_batch_size) ds = test_dataset.batch(self.args.eval_batch_size).prefetch(tf.data.experimental.AUTOTUNE) return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples def create_optimizer_and_scheduler(self, num_training_steps: int): if not self.optimizer and not self.lr_scheduler: warmup_steps = ( self.args.warmup_steps if self.args.warmup_steps > 0 else math.ceil(num_training_steps * self.args.warmup_ratio) ) self.optimizer, self.lr_scheduler = create_optimizer( self.args.learning_rate, num_training_steps, warmup_steps, adam_beta1=self.args.adam_beta1, adam_beta2=self.args.adam_beta2, adam_epsilon=self.args.adam_epsilon, weight_decay_rate=self.args.weight_decay, power=self.args.poly_power, ) def setup_wandb(self): logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"') combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()} wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name) def setup_comet(self): comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} experiment = None if comet_mode == "ONLINE": experiment = comet_ml.Experiment(**args) logger.info("Automatic Comet.ml online logging enabled") elif comet_mode == "OFFLINE": args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") experiment = comet_ml.OfflineExperiment(**args) logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") if experiment is not None: experiment._set_model_graph(self.model, framework="transformers") experiment._log_parameters(self.args, prefix="args/", framework="transformers") experiment._log_parameters(self.model.config, prefix="config/", framework="transformers") def prediction_loop( self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool] = None, ) -> PredictionOutput: prediction_loss_only = ( prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only ) logger.info(f"***** Running {description} *****") logger.info(f" Num examples in dataset = {num_examples}") if description == "Evaluation": logger.info(f" Num examples in used in evaluation = {self.args.eval_batch_size * steps}") logger.info(f" Batch size = {self.args.eval_batch_size}") label_ids: np.ndarray = None preds: np.ndarray = None self.eval_loss.reset_states() if self.args.past_index >= 0: self._past = None for step, batch in enumerate(dataset): logits = self.distributed_prediction_steps(batch) _, labels = batch if not prediction_loss_only: if isinstance(logits, tuple): logits = logits[0] if isinstance(labels, tuple): labels = labels[0] if self.args.n_replicas > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) if step == steps - 1: break if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = self.eval_loss.result().numpy() / steps for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) if self.args.past_index and hasattr(self, "_past"): delattr(self, "_past") return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def log(self, logs: Dict[str, float]) -> None: logs["epoch"] = self.epoch_logging if self.tb_writer: with self.tb_writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=self.global_step) self.tb_writer.flush() if is_wandb_available(): wandb.log(logs, step=self.global_step) if is_comet_available(): experiment = comet_ml.config.get_global_experiment() if experiment is not None: experiment._log_metrics( logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers" ) output = {**logs, **{"step": self.global_step}} logger.info(output) def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]: eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset) output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation") logs = {**output.metrics} logs["epoch"] = self.epoch_logging self.log(logs) return output.metrics def prediction_step( self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor ) -> tf.Tensor: per_example_loss, logits = self.run_model(features, labels, False) scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype) self.eval_loss.update_state(scaled_loss) return logits @tf.function def distributed_prediction_steps(self, batch): nb_instances_in_batch = self._compute_nb_instances(batch) inputs = self._get_step_inputs(batch, nb_instances_in_batch) logits = self.args.strategy.run(self.prediction_step, inputs) return logits def train(self) -> None: train_ds = self.get_train_tfdataset() if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size approx = math.floor if self.args.dataloader_drop_last else math.ceil num_update_steps_per_epoch = approx(num_update_steps_per_epoch) num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) self.steps_per_epoch = num_update_steps_per_epoch if self.args.max_steps > 0: t_total = self.args.max_steps epochs = (self.args.max_steps // self.steps_per_epoch) + int( self.args.max_steps % self.steps_per_epoch > 0 ) else: t_total = self.steps_per_epoch * self.args.num_train_epochs epochs = self.args.num_train_epochs epochs = float(epochs) with self.args.strategy.scope(): self.create_optimizer_and_scheduler(num_training_steps=t_total) folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR) ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit) iterations = self.optimizer.iterations epochs_trained = 0 steps_trained_in_current_epoch = 0 if self.model.ckpt_manager.latest_checkpoint: logger.info( f"Checkpoint file {self.model.ckpt_manager.latest_checkpoint} found and restoring from checkpoint" ) ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() self.global_step = iterations.numpy() epochs_trained = self.global_step // self.steps_per_epoch steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info(f" Continuing training from global step {self.global_step}") logger.info(f" Will skip the first {steps_trained_in_current_epoch} steps in the first epoch") tf.summary.experimental.set_step(self.global_step) with self.tb_writer.as_default(): tf.summary.text("args", self.args.to_json_string()) self.tb_writer.flush() logger.info("***** Running training *****") logger.info(f" Num examples = {self.num_train_examples}") logger.info(f" Num Epochs = {epochs}") logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {self.total_train_batch_size}" ) logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}") logger.info(f" Steps per epoch = {self.steps_per_epoch}") logger.info(f" Total optimization steps = {t_total}") self.train_loss = tf.keras.metrics.Sum() start_time = datetime.datetime.now() for epoch_iter in range(epochs_trained, int(epochs)): if self.args.past_index >= 0: self._past = None for step, batch in enumerate(train_ds): if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue self.distributed_training_steps(batch) self.global_step = iterations.numpy() self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch training_loss = self.train_loss.result() / (step + 1) if self.args.debug: logs = {} logs["loss"] = training_loss.numpy() logs["epoch"] = self.epoch_logging self.log(logs) if self.global_step == 1 and self.args.debug: with self.tb_writer.as_default(): tf.summary.trace_export( name="training", step=self.global_step, profiler_outdir=self.args.logging_dir ) if ( self.args.eval_steps > 0 and self.args.evaluation_strategy == IntervalStrategy.STEPS and self.global_step % self.args.eval_steps == 0 ): self.evaluate() if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or ( self.global_step == 1 and self.args.logging_first_step ): logs = {} logs["loss"] = training_loss.numpy() logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy() logs["epoch"] = self.epoch_logging self.log(logs) if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info(f"Saving checkpoint for step {self.global_step} at {ckpt_save_path}") if self.args.max_steps > 0 and self.global_step >= t_total: break if self.global_step % self.steps_per_epoch == 0: break self.train_loss.reset_states() if self.args.max_steps > 0 and self.global_step >= self.args.max_steps: break end_time = datetime.datetime.now() logger.info(f"Training took: {str(end_time - start_time)}") if self.args.past_index and hasattr(self, "_past"): delattr(self, "_past") def training_step(self, features, labels, nb_instances_in_global_batch): per_example_loss, _ = self.run_model(features, labels, True) scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype) gradients = tf.gradients(scaled_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] if self.args.gradient_accumulation_steps > 1: self.gradient_accumulator(gradients) self.train_loss.update_state(scaled_loss) if self.args.gradient_accumulation_steps == 1: return gradients def apply_gradients(self, features, labels, nb_instances_in_global_batch): if self.args.gradient_accumulation_steps == 1: gradients = self.training_step(features, labels, nb_instances_in_global_batch) self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) else: for _ in tf.range(self.args.gradient_accumulation_steps): reduced_features = { k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items() } if tf.is_tensor(labels): reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas] elif isinstance(labels, dict): reduced_labels = { k: lbl[: self.args.train_batch_size // self.args.n_replicas] for k, lbl in labels.items() } else: raise ValueError("The labels must be either a tf.Tensor or a dict.") self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch) features = { k: tf.concat( [ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]], axis=0, ) for k, ft in features.items() } if tf.is_tensor(labels): labels = tf.concat( [labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0 ) elif isinstance(labels, dict): labels = { k: tf.concat( [lbl[self.args.train_batch_size // self.args.n_replicas :], reduced_labels[k]], axis=0, ) for k, lbl in labels.items() } else: raise ValueError("The labels must be either a tf.Tensor or a dict.") gradients = self.gradient_accumulator.gradients gradients = [ (tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients ] self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables))) self.gradient_accumulator.reset() @tf.function def distributed_training_steps(self, batch): with self.args.strategy.scope(): nb_instances_in_batch = self._compute_nb_instances(batch) inputs = self._get_step_inputs(batch, nb_instances_in_batch) self.args.strategy.run(self.apply_gradients, inputs) @staticmethod def _compute_nb_instances(batch): labels = batch[-1] if isinstance(labels, PerReplica): labels = tf.concat(labels.values, axis=0) nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32)) return nb_instances @staticmethod def _get_step_inputs(batch, nb_instances): features, labels = batch if isinstance(labels, PerReplica): nb_instances = PerReplica([nb_instances] * len(labels.values)) step_inputs = (features, labels, nb_instances) return step_inputs def run_model(self, features, labels, training): if self.args.past_index >= 0 and getattr(self, "_past", None) is not None: features["mems"] = self._past if isinstance(labels, (dict)): outputs = self.model(features, training=training, **labels)[:2] else: outputs = self.model(features, labels=labels, training=training)[:2] loss, logits = outputs[:2] if self.args.past_index >= 0: self._past = outputs[self.args.past_index] return loss, logits def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset) return self.prediction_loop(test_ds, steps, num_examples, description="Prediction") def save_model(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info(f"Saving model in {output_dir}") if not isinstance(self.model, TFPreTrainedModel): raise ValueError("Trainer.model appears to not be a PreTrainedModel") self.model.save_pretrained(output_dir)
codingutf8 2020present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license utilities for the trainer and tftrainer class should be independent from pytorch and tensorflow helper function to set worker seed during dataloader initialization helper function for reproducible behavior during distributed training see https pytorch orgdocsstablenotesrandomness html for pytorch https www tensorflow orgapidocspythontfconfigexperimentalenableopdeterminism for tensorflow set seed first enable pytorch deterministic mode this potentially requires either the environment variable cudalaunchblocking or cublasworkspaceconfig to be set depending on the cuda version so we set them both here enable cudnn deterministic mode helper function for reproducible behavior to set the seed in random numpy torch andor tf if installed args seed int the seed to set safe to call this function even if cuda is not available implements the neftune forward pass for the model using forward hooks note this works only for torch nn embedding layers this method is slightly adapted from the original source code that can be found here https github comneelsjainneftune simply add it to your model as follows python model model embedtokens neftunenoisealpha 0 1 model embedtokens registerforwardhookneftunepostforwardhook args module torch nn module the embedding module where the hook is attached note that you need to set module neftunenoisealpha to the desired noise alpha value input torch tensor the input tensor to the model output torch tensor the output tensor of the model i e the embeddings evaluation output always contains labels to be used to compute metrics parameters predictions np ndarray predictions of the model labelids np ndarray targets to be matched inputs np ndarray optional the best run found by a hyperparameter search see trainer hyperparametersearch parameters runid str the id of the best run if models were saved the corresponding checkpoint will be in the folder ending with runrunid objective float the objective that was obtained for this run hyperparameters dictstr any the hyperparameters picked to get this run runsummary optionalany a summary of tuning experiments ray tune experimentanalysis object for ray backend the default objective to maximizeminimize when doing an hyperparameter search it is the evaluation loss if no metrics are provided to the trainer the sum of all metrics otherwise args metrics dictstr float the metrics returned by the evaluate method return float the objective to minimize or maximize remove speed metrics whether or not the current process is the local process based on xm getordinal for tpus first then on localrank return the number of processes launched in parallel works with torch distributed and tpus measure and return speed performance metrics this function requires a time snapshot starttime before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed args split name to prefix metric like train eval test starttime operation start time numsamples number of samples processed numtokens number of tokens processed a helper class that tracks cpu and gpu memory this class will silently skip unless psutil is available install with pip install psutil when a stage completes it can pass metrics dict to update with the memory metrics gathered during this stage example python self memorytracker trainermemorytrackerself args skipmemorymetrics self memorytracker start code metrics trainruntime 10 5 self memorytracker stopandupdatemetricsmetrics at the moment gpu tracking is only for pytorch but can be extended to support tensorflow to understand this class intricacies please read the documentation of trainer logmetrics map trainer methods to metrics prefix soft dependency on psutil derives the stagecaller name automatically caller inspect currentframe fback fback fcode coname if caller in self stages return self stagescaller else raise valueerror fwas called from caller but only expect to be called from one of self stages keys def cpumemusedself can t sleep or will not catch the peak right this comment is here on purpose time sleep0 001 1msec start tracking for the caller s stage if self skipmemorymetrics return stage self derivestage deal with nested calls of eval during train simply ignore those if self curstage is not none and self curstage stage return self curstage stage gc collect if self torch is not none if torch cuda isavailable self torch cuda resetpeakmemorystats self torch cuda emptycache elif istorchxpuavailable self torch xpu resetpeakmemorystats self torch xpu emptycache elif istorchnpuavailable self torch npu resetpeakmemorystats self torch npu emptycache gpu if self torch is not none if torch cuda isavailable self gpumemusedatstart self torch cuda memoryallocated elif istorchxpuavailable self gpumemusedatstart self torch xpu memoryallocated elif istorchnpuavailable self gpumemusedatstart self torch npu memoryallocated cpu self cpumemusedatstart self cpumemused self peakmonitoring true peakmonitorthread threading threadtargetself peakmonitorfunc peakmonitorthread daemon true peakmonitorthread start def stopself stage deal with nested calls of eval during train simply ignore those this sends a signal to peakmonitorfunc to complete its loop first ensure all objects get collected and their memory is freed concepts allocdelta the difference of allocated memory between the end and the start peakeddelta the difference between the peak memory and the current memory in order to know how much memory the measured code consumed one needs to sum these two gpu cpu reset cycle finished updates the metrics if self skipmemorymetrics return deal with nested calls of eval during train simply ignore those if self curstage is not none and self curstage stage return since we don t have a way to return init metrics we push them into the first of trainvalpredict stages stage if not self initreported stages insert0 init self initreported true for stage in stages for t in alloc peaked if stage in self cpu and t in self cpustage metricsfstagememcputdelta self cpustaget if self torch is not none and stage in self gpu and t in self gpustage metricsfstagememgputdelta self gpustaget if we need additional debug info enable the following for t in begin end if stage in self cpu and t in self cpustage metricsfstagememcput self cpustaget if self torch is not none and stage in self gpu and t in self gpustage metricsfstagememgput self gpustaget since memory can be allocated before init and it might be difficult to track overall memory usage in particular for gpu let s report memory usage at the point init was called if stages0 init metricsbeforeinitmemcpu self cpuinitbegin if self torch is not none metricsbeforeinitmemgpu self gpuinitbegin if we also wanted to report any additional memory allocations in between init and whatever the next stage was we could also report this if self cpuinitend self cpustagebegin metricsfafterinitmemcpudelta self cpustagebegin self cpuinitend if self torch is not none and self gpuinitend self gpustagebegin metricsfafterinitmemgpudelta self gpustagebegin self gpuinitend def stopandupdatemetricsself metricsnone init doesn t have metrics to update so we just save that data for later stages to retrieve checks if the dataset implements len and it doesn t raise an error typeerror len of unsized object recursively calls item on the element of the dictionary passed return the number of arguments of the passed function even if it s a partial function args a basic decorator that will try to execute function if it fails from exceptions related to outofmemory or cudnn the batch size is cut in half and passed to function function must take in a batchsize parameter as its first argument function callable optional a function to wrap startingbatchsize int optional the batch size to try and fit into memory autofindbatchsize bool optional if false will just execute function wrap the data collator to remove unused columns before they are passed to the collator def init self datacollator signaturecolumns loggernone modelname optionalstr none description optionalstr none self datacollator datacollator self signaturecolumns signaturecolumns self logger logger self description description self modelname modelname self messagelogged false def removecolumnsself feature dict dict if not isinstancefeature dict return feature if not self messagelogged and self logger and self modelname ignoredcolumns listsetfeature keys setself signaturecolumns if lenignoredcolumns 0 dsetdescription if self description is none else fin the self description set self logger info fthe following columns dsetdescription don t have a corresponding argument in fself modelname forward and have been ignored joinignoredcolumns f if joinignoredcolumns are not expected by self modelname forward you can safely ignore this message self messagelogged true return k v for k v in feature items if k in self signaturecolumns def callself features listdict features self removecolumnsfeature for feature in features return self datacollatorfeatures coding utf 8 2020 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license utilities for the trainer and tftrainer class should be independent from pytorch and tensorflow helper function to set worker seed during dataloader initialization helper function for reproducible behavior during distributed training see https pytorch org docs stable notes randomness html for pytorch https www tensorflow org api_docs python tf config experimental enable_op_determinism for tensorflow set seed first enable pytorch deterministic mode this potentially requires either the environment variable cuda_launch_blocking or cublas_workspace_config to be set depending on the cuda version so we set them both here enable cudnn deterministic mode helper function for reproducible behavior to set the seed in random numpy torch and or tf if installed args seed int the seed to set safe to call this function even if cuda is not available implements the neftune forward pass for the model using forward hooks note this works only for torch nn embedding layers this method is slightly adapted from the original source code that can be found here https github com neelsjain neftune simply add it to your model as follows python model model embed_tokens neftune_noise_alpha 0 1 model embed_tokens register_forward_hook neftune_post_forward_hook args module torch nn module the embedding module where the hook is attached note that you need to set module neftune_noise_alpha to the desired noise alpha value input torch tensor the input tensor to the model output torch tensor the output tensor of the model i e the embeddings evaluation output always contains labels to be used to compute metrics parameters predictions np ndarray predictions of the model label_ids np ndarray targets to be matched inputs np ndarray optional the best run found by a hyperparameter search see trainer hyperparameter_search parameters run_id str the id of the best run if models were saved the corresponding checkpoint will be in the folder ending with run run_id objective float the objective that was obtained for this run hyperparameters dict str any the hyperparameters picked to get this run run_summary optional any a summary of tuning experiments ray tune experimentanalysis object for ray backend the default objective to maximize minimize when doing an hyperparameter search it is the evaluation loss if no metrics are provided to the trainer the sum of all metrics otherwise args metrics dict str float the metrics returned by the evaluate method return float the objective to minimize or maximize remove speed metrics whether or not the current process is the local process based on xm get_ordinal for tpus first then on local_rank return the number of processes launched in parallel works with torch distributed and tpus measure and return speed performance metrics this function requires a time snapshot start_time before the operation to be measured starts and this function should be run immediately after the operation to be measured has completed args split name to prefix metric like train eval test start_time operation start time num_samples number of samples processed num_tokens number of tokens processed a helper class that tracks cpu and gpu memory this class will silently skip unless psutil is available install with pip install psutil when a stage completes it can pass metrics dict to update with the memory metrics gathered during this stage example python self _memory_tracker trainermemorytracker self args skip_memory_metrics self _memory_tracker start code metrics train_runtime 10 5 self _memory_tracker stop_and_update_metrics metrics at the moment gpu tracking is only for pytorch but can be extended to support tensorflow to understand this class intricacies please read the documentation of trainer log_metrics map trainer methods to metrics prefix soft dependency on psutil noqa derives the stage caller name automatically get resident set size memory for the current process can t sleep or will not catch the peak right this comment is here on purpose time sleep 0 001 1msec start tracking for the caller s stage deal with nested calls of eval during train simply ignore those gpu cpu stop tracking for the passed stage deal with nested calls of eval during train simply ignore those this sends a signal to peak_monitor_func to complete its loop first ensure all objects get collected and their memory is freed concepts alloc_delta the difference of allocated memory between the end and the start peaked_delta the difference between the peak memory and the current memory in order to know how much memory the measured code consumed one needs to sum these two gpu cpu reset cycle finished updates the metrics deal with nested calls of eval during train simply ignore those since we don t have a way to return init metrics we push them into the first of train val predict if we need additional debug info enable the following for t in begin end if stage in self cpu and t in self cpu stage metrics f stage _mem_cpu_ t self cpu stage t if self torch is not none and stage in self gpu and t in self gpu stage metrics f stage _mem_gpu_ t self gpu stage t since memory can be allocated before init and it might be difficult to track overall memory usage in particular for gpu let s report memory usage at the point init was called if we also wanted to report any additional memory allocations in between init and whatever the next stage was we could also report this if self cpu init end self cpu stage begin metrics f after_init_mem_cpu_delta self cpu stage begin self cpu init end if self torch is not none and self gpu init end self gpu stage begin metrics f after_init_mem_gpu_delta self gpu stage begin self gpu init end combine stop and metrics update in one call for simpler code init doesn t have metrics to update so we just save that data for later stages to retrieve checks if the dataset implements __len__ and it doesn t raise an error typeerror len of unsized object recursively calls item on the element of the dictionary passed return the number of arguments of the passed function even if it s a partial function args a basic decorator that will try to execute function if it fails from exceptions related to out of memory or cudnn the batch size is cut in half and passed to function function must take in a batch_size parameter as its first argument function callable optional a function to wrap starting_batch_size int optional the batch size to try and fit into memory auto_find_batch_size bool optional if false will just execute function wrap the data collator to remove unused columns before they are passed to the collator
import copy import functools import gc import inspect import os import random import re import threading import time from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np from .utils import ( ExplicitEnum, is_psutil_available, is_tf_available, is_torch_available, is_torch_cuda_available, is_torch_mps_available, is_torch_npu_available, is_torch_tpu_available, is_torch_xpu_available, requires_backends, ) if is_torch_available(): import torch def seed_worker(_): worker_seed = torch.initial_seed() % 2**32 set_seed(worker_seed) def enable_full_determinism(seed: int, warn_only: bool = False): set_seed(seed) if is_torch_available(): os.environ["CUDA_LAUNCH_BLOCKING"] = "1" os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" torch.use_deterministic_algorithms(True, warn_only=warn_only) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False if is_tf_available(): import tensorflow as tf tf.config.experimental.enable_op_determinism() def set_seed(seed: int): random.seed(seed) np.random.seed(seed) if is_torch_available(): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if is_torch_npu_available(): torch.npu.manual_seed_all(seed) if is_torch_xpu_available(): torch.xpu.manual_seed_all(seed) if is_tf_available(): import tensorflow as tf tf.random.set_seed(seed) def neftune_post_forward_hook(module, input, output): if module.training: dims = torch.tensor(output.size(1) * output.size(2)) mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) return output class EvalPrediction: def __init__( self, predictions: Union[np.ndarray, Tuple[np.ndarray]], label_ids: Union[np.ndarray, Tuple[np.ndarray]], inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None, ): self.predictions = predictions self.label_ids = label_ids self.inputs = inputs def __iter__(self): if self.inputs is not None: return iter((self.predictions, self.label_ids, self.inputs)) else: return iter((self.predictions, self.label_ids)) def __getitem__(self, idx): if idx < 0 or idx > 2: raise IndexError("tuple index out of range") if idx == 2 and self.inputs is None: raise IndexError("tuple index out of range") if idx == 0: return self.predictions elif idx == 1: return self.label_ids elif idx == 2: return self.inputs class EvalLoopOutput(NamedTuple): predictions: Union[np.ndarray, Tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] metrics: Optional[Dict[str, float]] num_samples: Optional[int] class PredictionOutput(NamedTuple): predictions: Union[np.ndarray, Tuple[np.ndarray]] label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]] metrics: Optional[Dict[str, float]] class TrainOutput(NamedTuple): global_step: int training_loss: float metrics: Dict[str, float] PREFIX_CHECKPOINT_DIR = "checkpoint" _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$") def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) class IntervalStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class EvaluationStrategy(ExplicitEnum): NO = "no" STEPS = "steps" EPOCH = "epoch" class HubStrategy(ExplicitEnum): END = "end" EVERY_SAVE = "every_save" CHECKPOINT = "checkpoint" ALL_CHECKPOINTS = "all_checkpoints" class BestRun(NamedTuple): run_id: str objective: Union[float, List[float]] hyperparameters: Dict[str, Any] run_summary: Optional[Any] = None def default_compute_objective(metrics: Dict[str, float]) -> float: metrics = copy.deepcopy(metrics) loss = metrics.pop("eval_loss", None) _ = metrics.pop("epoch", None) speed_metrics = [ m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second") or m.endswith("_compilation_time") ] for sm in speed_metrics: _ = metrics.pop(sm, None) return loss if len(metrics) == 0 else sum(metrics.values()) def default_hp_space_optuna(trial) -> Dict[str, float]: from .integrations import is_optuna_available assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`" return { "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), "num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5), "seed": trial.suggest_int("seed", 1, 40), "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]), } def default_hp_space_ray(trial) -> Dict[str, float]: from .integrations import is_ray_tune_available assert is_ray_tune_available(), "This function needs ray installed: `pip install ray[tune]`" from ray import tune return { "learning_rate": tune.loguniform(1e-6, 1e-4), "num_train_epochs": tune.choice(list(range(1, 6))), "seed": tune.uniform(1, 40), "per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]), } def default_hp_space_sigopt(trial): return [ {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"}, {"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"}, {"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"}, { "categorical_values": ["4", "8", "16", "32", "64"], "name": "per_device_train_batch_size", "type": "categorical", }, ] def default_hp_space_wandb(trial) -> Dict[str, float]: from .integrations import is_wandb_available if not is_wandb_available(): raise ImportError("This function needs wandb installed: `pip install wandb`") return { "method": "random", "metric": {"name": "objective", "goal": "minimize"}, "parameters": { "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6}, "seed": {"distribution": "int_uniform", "min": 1, "max": 40}, "per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]}, }, } class HPSearchBackend(ExplicitEnum): OPTUNA = "optuna" RAY = "ray" SIGOPT = "sigopt" WANDB = "wandb" def is_main_process(local_rank): if is_torch_tpu_available(check_device=True): import torch_xla.core.xla_model as xm return xm.get_ordinal() == 0 return local_rank in [-1, 0] def total_processes_number(local_rank): if is_torch_tpu_available(check_device=True): import torch_xla.core.xla_model as xm return xm.xrt_world_size() elif local_rank != -1 and is_torch_available(): import torch return torch.distributed.get_world_size() return 1 def speed_metrics(split, start_time, num_samples=None, num_steps=None, num_tokens=None): runtime = time.time() - start_time result = {f"{split}_runtime": round(runtime, 4)} if runtime == 0: return result if num_samples is not None: samples_per_second = num_samples / runtime result[f"{split}_samples_per_second"] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f"{split}_steps_per_second"] = round(steps_per_second, 3) if num_tokens is not None: tokens_per_second = num_tokens / runtime result[f"{split}_tokens_per_second"] = round(tokens_per_second, 3) return result class SchedulerType(ExplicitEnum): LINEAR = "linear" COSINE = "cosine" COSINE_WITH_RESTARTS = "cosine_with_restarts" POLYNOMIAL = "polynomial" CONSTANT = "constant" CONSTANT_WITH_WARMUP = "constant_with_warmup" INVERSE_SQRT = "inverse_sqrt" REDUCE_ON_PLATEAU = "reduce_lr_on_plateau" class TrainerMemoryTracker: stages = { "__init__": "init", "train": "train", "_inner_training_loop": "train", "evaluate": "eval", "predict": "test", } def __init__(self, skip_memory_metrics=False): self.skip_memory_metrics = skip_memory_metrics if not is_psutil_available(): self.skip_memory_metrics = True if self.skip_memory_metrics: return import psutil if is_torch_cuda_available(): import torch self.torch = torch self.gpu = {} elif is_torch_mps_available(): import torch self.torch = torch self.gpu = {} elif is_torch_xpu_available(): import torch self.torch = torch self.gpu = {} elif is_torch_npu_available(): import torch self.torch = torch self.gpu = {} else: self.torch = None self.process = psutil.Process() self.cur_stage = None self.cpu = {} self.init_reported = False def derive_stage(self): caller = inspect.currentframe().f_back.f_back.f_code.co_name if caller in self.stages: return self.stages[caller] else: raise ValueError( f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}" ) def cpu_mem_used(self): return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_mem_used_peak = -1 while True: self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak) if not self.peak_monitoring: break def start(self): if self.skip_memory_metrics: return stage = self.derive_stage() if self.cur_stage is not None and self.cur_stage != stage: return self.cur_stage = stage gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.reset_peak_memory_stats() self.torch.cuda.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.reset_peak_memory_stats() self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.reset_peak_memory_stats() self.torch.npu.empty_cache() if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_at_start = self.torch.xpu.memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_at_start = self.torch.npu.memory_allocated() self.cpu_mem_used_at_start = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() def stop(self, stage): if self.cur_stage is not None and self.cur_stage != stage: return self.peak_monitoring = False gc.collect() if self.torch is not None: if torch.cuda.is_available(): self.torch.cuda.empty_cache() elif is_torch_xpu_available(): self.torch.xpu.empty_cache() elif is_torch_npu_available(): self.torch.npu.empty_cache() if self.torch is not None: if torch.cuda.is_available(): self.gpu_mem_used_now = self.torch.cuda.memory_allocated() self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated() elif is_torch_xpu_available(): self.gpu_mem_used_now = self.torch.xpu.memory_allocated() self.gpu_mem_used_peak = self.torch.xpu.max_memory_allocated() elif is_torch_npu_available(): self.gpu_mem_used_now = self.torch.npu.memory_allocated() self.gpu_mem_used_peak = self.torch.npu.max_memory_allocated() else: raise ValueError("No available GPU device found!") self.gpu[self.cur_stage] = { "begin": self.gpu_mem_used_at_start, "end": self.gpu_mem_used_now, "alloc": (self.gpu_mem_used_now - self.gpu_mem_used_at_start), "peaked": max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now), } self.cpu_mem_used_now = self.cpu_mem_used() self.cpu[self.cur_stage] = { "begin": self.cpu_mem_used_at_start, "end": self.cpu_mem_used_now, "alloc": (self.cpu_mem_used_now - self.cpu_mem_used_at_start), "peaked": max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now), } self.cur_stage = None def update_metrics(self, stage, metrics): if self.skip_memory_metrics: return if self.cur_stage is not None and self.cur_stage != stage: return stages = [stage] if not self.init_reported: stages.insert(0, "init") self.init_reported = True for stage in stages: for t in ["alloc", "peaked"]: if stage in self.cpu and t in self.cpu[stage]: metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t] if self.torch is not None and stage in self.gpu and t in self.gpu[stage]: metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] if stages[0] == "init": metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"] if self.torch is not None: metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"] def stop_and_update_metrics(self, metrics=None): if self.skip_memory_metrics: return stage = self.derive_stage() self.stop(stage) if metrics is not None: self.update_metrics(stage, metrics) def has_length(dataset): try: return len(dataset) is not None except TypeError: return False def denumpify_detensorize(metrics): if isinstance(metrics, (list, tuple)): return type(metrics)(denumpify_detensorize(m) for m in metrics) elif isinstance(metrics, dict): return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()}) elif isinstance(metrics, np.generic): return metrics.item() elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1: return metrics.item() return metrics def number_of_arguments(func): if isinstance(func, functools.partial): total_args = len(inspect.signature(func.func).parameters) return total_args - len(func.args) - len(func.keywords) return len(inspect.signature(func).parameters) def find_executable_batch_size( function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False ): if function is None: return functools.partial( find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size, ) if auto_find_batch_size: requires_backends(find_executable_batch_size, "accelerate") from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size) return functools.partial(function, batch_size=starting_batch_size) class FSDPOption(ExplicitEnum): FULL_SHARD = "full_shard" SHARD_GRAD_OP = "shard_grad_op" NO_SHARD = "no_shard" HYBRID_SHARD = "hybrid_shard" HYBRID_SHARD_ZERO2 = "hybrid_shard_zero2" OFFLOAD = "offload" AUTO_WRAP = "auto_wrap" class RemoveColumnsCollator: def __init__( self, data_collator, signature_columns, logger=None, model_name: Optional[str] = None, description: Optional[str] = None, ): self.data_collator = data_collator self.signature_columns = signature_columns self.logger = logger self.description = description self.model_name = model_name self.message_logged = False def _remove_columns(self, feature: dict) -> dict: if not isinstance(feature, dict): return feature if not self.message_logged and self.logger and self.model_name: ignored_columns = list(set(feature.keys()) - set(self.signature_columns)) if len(ignored_columns) > 0: dset_description = "" if self.description is None else f"in the {self.description} set" self.logger.info( f"The following columns {dset_description} don't have a corresponding argument in " f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}." f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, " " you can safely ignore this message." ) self.message_logged = True return {k: v for k, v in feature.items() if k in self.signature_columns} def __call__(self, features: List[dict]): features = [self._remove_columns(feature) for feature in features] return self.data_collator(features)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license args sortishsampler bool optional defaults to false whether to use a sortish sampler or not only possible if the underlying datasets are seq2seqdataset for now but will become generally available in the near future it sorts the inputs according to lengths in order to minimize the padding size with a bit of randomness for the training set predictwithgenerate bool optional defaults to false whether to use generate to calculate generative metrics rouge bleu generationmaxlength int optional the maxlength to use on each evaluation loop when predictwithgeneratetrue will default to the maxlength value of the model configuration generationnumbeams int optional the numbeams to use on each evaluation loop when predictwithgeneratetrue will default to the numbeams value of the model configuration generationconfig str or path or generation generationconfig optional allows to load a generation generationconfig from the frompretrained method this can be either a string the model id of a pretrained model configuration hosted inside a model repo on huggingface co valid model ids can be located at the rootlevel like bertbaseuncased or namespaced under a user or organization name like dbmdzbertbasegermancased a path to a directory containing a configuration file saved using the generationconfig savepretrained method e g mymodeldirectory a generation generationconfig object serializes this instance while replace enum by their values and generationconfig by dictionaries for json serialization support it obfuscates the token values by removing their value filter out fields that are defined as fieldinitfalse 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license args sortish_sampler bool optional defaults to false whether to use a sortish sampler or not only possible if the underlying datasets are seq2seqdataset for now but will become generally available in the near future it sorts the inputs according to lengths in order to minimize the padding size with a bit of randomness for the training set predict_with_generate bool optional defaults to false whether to use generate to calculate generative metrics rouge bleu generation_max_length int optional the max_length to use on each evaluation loop when predict_with_generate true will default to the max_length value of the model configuration generation_num_beams int optional the num_beams to use on each evaluation loop when predict_with_generate true will default to the num_beams value of the model configuration generation_config str or path or generation generationconfig optional allows to load a generation generationconfig from the from_pretrained method this can be either a string the model id of a pretrained model configuration hosted inside a model repo on huggingface co valid model ids can be located at the root level like bert base uncased or namespaced under a user or organization name like dbmdz bert base german cased a path to a directory containing a configuration file saved using the generationconfig save_pretrained method e g my_model_directory a generation generationconfig object serializes this instance while replace enum by their values and generationconfig by dictionaries for json serialization support it obfuscates the token values by removing their value filter out fields that are defined as field init false
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings logger = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__) class Seq2SeqTrainingArguments(TrainingArguments): sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) generation_max_length: Optional[int] = field( default=None, metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) }, ) generation_num_beams: Optional[int] = field( default=None, metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) }, ) generation_config: Optional[Union[str, Path, GenerationConfig]] = field( default=None, metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." }, ) def to_dict(self): d = super().to_dict() for k, v in d.items(): if isinstance(v, GenerationConfig): d[k] = v.to_dict() return d
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license trainingarguments is the subset of the arguments we use in our example scripts which relate to the training loop itself using hfargumentparser we can turn this class into argparsehttps docs python org3libraryargparsemoduleargparse arguments that can be specified on the command line parameters outputdir str the output directory where the model predictions and checkpoints will be written overwriteoutputdir bool optional defaults to false if true overwrite the content of the output directory use this to continue training if outputdir points to a checkpoint directory dotrain bool optional defaults to false whether to run training or not this argument is not directly used by trainer it s intended to be used by your trainingevaluation scripts instead see the example scriptshttps github comhuggingfacetransformerstreemainexamples for more details doeval bool optional whether to run evaluation on the validation set or not will be set to true if evaluationstrategy is different from no this argument is not directly used by trainer it s intended to be used by your trainingevaluation scripts instead see the example scriptshttps github comhuggingfacetransformerstreemainexamples for more details dopredict bool optional defaults to false whether to run predictions on the test set or not this argument is not directly used by trainer it s intended to be used by your trainingevaluation scripts instead see the example scriptshttps github comhuggingfacetransformerstreemainexamples for more details evaluationstrategy str or trainerutils intervalstrategy optional defaults to no the evaluation strategy to adopt during training possible values are no no evaluation is done during training steps evaluation is done and logged every evalsteps epoch evaluation is done at the end of each epoch perdevicetrainbatchsize int optional defaults to 8 the batch size per gputpu corecpu for training perdeviceevalbatchsize int optional defaults to 8 the batch size per gputpu corecpu for evaluation gradientaccumulationsteps int optional defaults to 1 number of updates steps to accumulate the gradients for before performing a backwardupdate pass tip warningtrue when using gradient accumulation one step is counted as one step with backward pass therefore logging evaluation save will be conducted every gradientaccumulationsteps xxxstep training examples tip learningrate float optional defaults to 5e5 the initial learning rate for adam weightdecay float optional defaults to 0 the weight decay to apply if not zero adambeta1 float optional defaults to 0 9 the beta1 hyperparameter for the adam optimizer adambeta2 float optional defaults to 0 999 the beta2 hyperparameter for the adam optimizer adamepsilon float optional defaults to 1e8 the epsilon hyperparameter for the adam optimizer maxgradnorm float optional defaults to 1 0 maximum gradient norm for gradient clipping numtrainepochsfloat optional defaults to 3 0 total number of training epochs to perform maxsteps int optional defaults to 1 if set to a positive number the total number of training steps to perform overrides numtrainepochs for a finite dataset training is reiterated through the dataset if all data is exhausted until maxsteps is reached warmupratio float optional defaults to 0 0 ratio of total training steps used for a linear warmup from 0 to learningrate warmupsteps int optional defaults to 0 number of steps used for a linear warmup from 0 to learningrate overrides any effect of warmupratio loggingdir str optional tensorboardhttps www tensorflow orgtensorboard log directory will default to runscurrentdatetimehostname loggingstrategy str or trainerutils intervalstrategy optional defaults to steps the logging strategy to adopt during training possible values are no no logging is done during training epoch logging is done at the end of each epoch steps logging is done every loggingsteps loggingfirststep bool optional defaults to false whether to log and evaluate the first globalstep or not loggingsteps int optional defaults to 500 number of update steps between two logs if loggingstrategysteps savestrategy str or trainerutils intervalstrategy optional defaults to steps the checkpoint save strategy to adopt during training possible values are no no save is done during training epoch save is done at the end of each epoch steps save is done every savesteps savesteps int optional defaults to 500 number of updates steps before two checkpoint saves if savestrategysteps savetotallimit int optional if a value is passed will limit the total amount of checkpoints deletes the older checkpoints in outputdir nocuda bool optional defaults to false whether to not use cuda even when it is available or not seed int optional defaults to 42 random seed that will be set at the beginning of training fp16 bool optional defaults to false whether to use 16bit mixed precision training through nvidia apex instead of 32bit training fp16optlevel str optional defaults to o1 for fp16 training apex amp optimization level selected in o0 o1 o2 and o3 see details on the apex documentationhttps nvidia github ioapexamp localrank int optional defaults to 1 during distributed training the rank of the process tpunumcores int optional when training on tpu the number of tpu cores automatically passed by launcher script debug bool optional defaults to false whether to activate the trace to record computation graphs and profiling information or not dataloaderdroplast bool optional defaults to false whether to drop the last incomplete batch if the length of the dataset is not divisible by the batch size or not evalsteps int optional defaults to 1000 number of update steps before two evaluations pastindex int optional defaults to 1 some models like transformerxl modeldoctransformerxl or docxlnet modeldocxlnet can make use of the past hidden states for their predictions if this argument is set to a positive int the trainer will use the corresponding output usually index 2 as the past state and feed it to the model at the next training step under the keyword argument mems tpuname str optional the name of the tpu the process is running on tpuzone str optional the zone of the tpu the process is running on if not specified we will attempt to automatically detect from metadata gcpproject str optional google cloud project name for the cloud tpuenabled project if not specified we will attempt to automatically detect from metadata runname str optional a descriptor for the run notably used for wandb logging xla bool optional whether to activate the xla compilation or not set to float16 at first set to bfloat16 in case of tpu if you only want to use a specific subset of gpus use cudavisibledevices0 the strategy used for distributed training the number of replicas cpus gpus or tpu cores used in this training whether or not the current process should produce log the actual batch size for training may differ from pergputrainbatchsize in distributed training the actual batch size for evaluation may differ from pergpuevalbatchsize in distributed training the number of replicas cpus gpus or tpu cores used in this training 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license trainingarguments is the subset of the arguments we use in our example scripts which relate to the training loop itself using hfargumentparser we can turn this class into argparse https docs python org 3 library argparse module argparse arguments that can be specified on the command line parameters output_dir str the output directory where the model predictions and checkpoints will be written overwrite_output_dir bool optional defaults to false if true overwrite the content of the output directory use this to continue training if output_dir points to a checkpoint directory do_train bool optional defaults to false whether to run training or not this argument is not directly used by trainer it s intended to be used by your training evaluation scripts instead see the example scripts https github com huggingface transformers tree main examples for more details do_eval bool optional whether to run evaluation on the validation set or not will be set to true if evaluation_strategy is different from no this argument is not directly used by trainer it s intended to be used by your training evaluation scripts instead see the example scripts https github com huggingface transformers tree main examples for more details do_predict bool optional defaults to false whether to run predictions on the test set or not this argument is not directly used by trainer it s intended to be used by your training evaluation scripts instead see the example scripts https github com huggingface transformers tree main examples for more details evaluation_strategy str or trainer_utils intervalstrategy optional defaults to no the evaluation strategy to adopt during training possible values are no no evaluation is done during training steps evaluation is done and logged every eval_steps epoch evaluation is done at the end of each epoch per_device_train_batch_size int optional defaults to 8 the batch size per gpu tpu core cpu for training per_device_eval_batch_size int optional defaults to 8 the batch size per gpu tpu core cpu for evaluation gradient_accumulation_steps int optional defaults to 1 number of updates steps to accumulate the gradients for before performing a backward update pass tip warning true when using gradient accumulation one step is counted as one step with backward pass therefore logging evaluation save will be conducted every gradient_accumulation_steps xxx_step training examples tip learning_rate float optional defaults to 5e 5 the initial learning rate for adam weight_decay float optional defaults to 0 the weight decay to apply if not zero adam_beta1 float optional defaults to 0 9 the beta1 hyperparameter for the adam optimizer adam_beta2 float optional defaults to 0 999 the beta2 hyperparameter for the adam optimizer adam_epsilon float optional defaults to 1e 8 the epsilon hyperparameter for the adam optimizer max_grad_norm float optional defaults to 1 0 maximum gradient norm for gradient clipping num_train_epochs float optional defaults to 3 0 total number of training epochs to perform max_steps int optional defaults to 1 if set to a positive number the total number of training steps to perform overrides num_train_epochs for a finite dataset training is reiterated through the dataset if all data is exhausted until max_steps is reached warmup_ratio float optional defaults to 0 0 ratio of total training steps used for a linear warmup from 0 to learning_rate warmup_steps int optional defaults to 0 number of steps used for a linear warmup from 0 to learning_rate overrides any effect of warmup_ratio logging_dir str optional tensorboard https www tensorflow org tensorboard log directory will default to runs current_datetime_hostname logging_strategy str or trainer_utils intervalstrategy optional defaults to steps the logging strategy to adopt during training possible values are no no logging is done during training epoch logging is done at the end of each epoch steps logging is done every logging_steps logging_first_step bool optional defaults to false whether to log and evaluate the first global_step or not logging_steps int optional defaults to 500 number of update steps between two logs if logging_strategy steps save_strategy str or trainer_utils intervalstrategy optional defaults to steps the checkpoint save strategy to adopt during training possible values are no no save is done during training epoch save is done at the end of each epoch steps save is done every save_steps save_steps int optional defaults to 500 number of updates steps before two checkpoint saves if save_strategy steps save_total_limit int optional if a value is passed will limit the total amount of checkpoints deletes the older checkpoints in output_dir no_cuda bool optional defaults to false whether to not use cuda even when it is available or not seed int optional defaults to 42 random seed that will be set at the beginning of training fp16 bool optional defaults to false whether to use 16 bit mixed precision training through nvidia apex instead of 32 bit training fp16_opt_level str optional defaults to o1 for fp16 training apex amp optimization level selected in o0 o1 o2 and o3 see details on the apex documentation https nvidia github io apex amp local_rank int optional defaults to 1 during distributed training the rank of the process tpu_num_cores int optional when training on tpu the number of tpu cores automatically passed by launcher script debug bool optional defaults to false whether to activate the trace to record computation graphs and profiling information or not dataloader_drop_last bool optional defaults to false whether to drop the last incomplete batch if the length of the dataset is not divisible by the batch size or not eval_steps int optional defaults to 1000 number of update steps before two evaluations past_index int optional defaults to 1 some models like transformerxl model_doc transformerxl or doc xlnet model_doc xlnet can make use of the past hidden states for their predictions if this argument is set to a positive int the trainer will use the corresponding output usually index 2 as the past state and feed it to the model at the next training step under the keyword argument mems tpu_name str optional the name of the tpu the process is running on tpu_zone str optional the zone of the tpu the process is running on if not specified we will attempt to automatically detect from metadata gcp_project str optional google cloud project name for the cloud tpu enabled project if not specified we will attempt to automatically detect from metadata run_name str optional a descriptor for the run notably used for wandb logging xla bool optional whether to activate the xla compilation or not set to float16 at first set to bfloat16 in case of tpu if you only want to use a specific subset of gpus use cuda_visible_devices 0 the strategy used for distributed training the number of replicas cpus gpus or tpu cores used in this training whether or not the current process should produce log tf logging is handled by keras not the trainer the actual batch size for training may differ from per_gpu_train_batch_size in distributed training the actual batch size for evaluation may differ from per_gpu_eval_batch_size in distributed training the number of replicas cpus gpus or tpu cores used in this training
import warnings from dataclasses import dataclass, field from typing import Optional, Tuple from .training_args import TrainingArguments from .utils import cached_property, is_tf_available, logging, requires_backends logger = logging.get_logger(__name__) if is_tf_available(): import tensorflow as tf @dataclass class TFTrainingArguments(TrainingArguments): framework = "tf" tpu_name: Optional[str] = field( default=None, metadata={"help": "Name of TPU"}, ) tpu_zone: Optional[str] = field( default=None, metadata={"help": "Zone of TPU"}, ) gcp_project: Optional[str] = field( default=None, metadata={"help": "Name of Cloud TPU-enabled project"}, ) poly_power: float = field( default=1.0, metadata={"help": "Power for the Polynomial decay LR scheduler."}, ) xla: bool = field(default=False, metadata={"help": "Whether to activate the XLA compilation or not"}) @cached_property def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]: requires_backends(self, ["tf"]) logger.info("Tensorflow: setting up strategy") gpus = tf.config.list_physical_devices("GPU") if self.fp16: tf.keras.mixed_precision.set_global_policy("mixed_float16") if self.no_cuda: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") else: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver( self.tpu_name, zone=self.tpu_zone, project=self.gcp_project ) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: if self.tpu_name: raise RuntimeError(f"Couldn't connect to TPU {self.tpu_name}!") else: tpu = None if tpu: if self.fp16: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.TPUStrategy(tpu) elif len(gpus) == 0: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") elif len(gpus) == 1: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") elif len(gpus) > 1: strategy = tf.distribute.MirroredStrategy() else: raise ValueError("Cannot find the proper strategy, please check your environment properties.") return strategy @property def strategy(self) -> "tf.distribute.Strategy": requires_backends(self, ["tf"]) return self._setup_strategy @property def n_replicas(self) -> int: requires_backends(self, ["tf"]) return self._setup_strategy.num_replicas_in_sync @property def should_log(self): return False @property def train_batch_size(self) -> int: if self.per_gpu_train_batch_size: logger.warning( "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " "version. Using `--per_device_train_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size return per_device_batch_size * self.n_replicas @property def eval_batch_size(self) -> int: if self.per_gpu_eval_batch_size: logger.warning( "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " "version. Using `--per_device_eval_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size return per_device_batch_size * self.n_replicas @property def n_gpu(self) -> int: requires_backends(self, ["tf"]) warnings.warn( "The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.", FutureWarning, ) return self._setup_strategy.num_replicas_in_sync
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license collection of utils to be used by backbones and their components import enum import inspect from typing import iterable list optional tuple union class backbonetypeenum enum timm timm transformers transformers def verifyoutfeaturesoutindices outfeatures optionaliterablestr outindices optionaliterableint stagenames optionaliterablestr if stagenames is none raise valueerrorstagenames must be set for transformers backbones if outfeatures is not none if not isinstanceoutfeatures list raise valueerrorfoutfeatures must be a list typeoutfeatures if anyfeat not in stagenames for feat in outfeatures raise valueerrorfoutfeatures must be a subset of stagenames stagenames got outfeatures if outindices is not none if not isinstanceoutindices list tuple raise valueerrorfoutindices must be a list or tuple got typeoutindices if anyidx lenstagenames for idx in outindices raise valueerrorfoutindices must be valid indices for stagenames stagenames got outindices if outfeatures is not none and outindices is not none if lenoutfeatures lenoutindices raise valueerroroutfeatures and outindices should have the same length if both are set if outfeatures stagenamesidx for idx in outindices raise valueerroroutfeatures and outindices should correspond to the same stages if both are set def alignoutputfeaturesoutputindices outfeatures optionalliststr outindices optionalunionlistint tupleint stagenames liststr if outindices is none and outfeatures is none outindices lenstagenames 1 outfeatures stagenames1 elif outindices is none and outfeatures is not none outindices stagenames indexlayer for layer in outfeatures elif outfeatures is none and outindices is not none outfeatures stagenamesidx for idx in outindices return outfeatures outindices def getalignedoutputfeaturesoutputindices outfeatures optionalliststr outindices optionalunionlistint tupleint stagenames liststr tupleliststr listint first verify that the outfeatures and outindices are valid verifyoutfeaturesoutindicesoutfeaturesoutfeatures outindicesoutindices stagenamesstagenames outputfeatures outputindices alignoutputfeaturesoutputindices outfeaturesoutfeatures outindicesoutindices stagenamesstagenames verify that the aligned outfeatures and outindices are valid verifyoutfeaturesoutindicesoutfeaturesoutputfeatures outindicesoutputindices stagenamesstagenames return outputfeatures outputindices class backbonemixin backbonetype optionalbackbonetype none def inittimmbackboneself config none if getattrself backbone none is none raise valueerrorself backbone must be set before calling inittimmbackbone these will diagree with the defaults for the transformers models e g for resnet50 the transformer model has outfeatures stem stage1 stage2 stage3 stage4 the timm model has outfeatures act layer1 layer2 layer3 layer4 self stagenames stagemodule for stage in self backbone featureinfo info self numfeatures stagenumchs for stage in self backbone featureinfo info outindices self backbone featureinfo outindices outfeatures self backbone featureinfo modulename we verify the out indices and out features are valid verifyoutfeaturesoutindices outfeaturesoutfeatures outindicesoutindices stagenamesself stagenames self outfeatures self outindices outfeatures outindices def inittransformersbackboneself config none stagenames getattrconfig stagenames outfeatures getattrconfig outfeatures none outindices getattrconfig outindices none self stagenames stagenames self outfeatures self outindices getalignedoutputfeaturesoutputindices outfeaturesoutfeatures outindicesoutindices stagenamesstagenames number of channels for each stage this is set in the transformer backbone model init self numfeatures none def initbackboneself config none self config config self usetimmbackbone getattrconfig usetimmbackbone false self backbonetype backbonetype timm if self usetimmbackbone else backbonetype transformers if self backbonetype backbonetype timm self inittimmbackboneconfig elif self backbonetype backbonetype transformers self inittransformersbackboneconfig else raise valueerrorfbackbonetype self backbonetype not supported property def outfeaturesself return self outfeatures outfeatures setter def outfeaturesself outfeatures liststr self outfeatures self outindices getalignedoutputfeaturesoutputindices outfeaturesoutfeatures outindicesnone stagenamesself stagenames property def outindicesself return self outindices outindices setter def outindicesself outindices uniontupleint listint self outfeatures self outindices getalignedoutputfeaturesoutputindices outfeaturesnone outindicesoutindices stagenamesself stagenames property def outfeaturechannelsself the current backbones will output the number of channels for each stage even if that stage is not in the outfeatures list return stage self numfeaturesi for i stage in enumerateself stagenames property def channelsself return self outfeaturechannelsname for name in self outfeatures def forwardwithfilteredkwargsself args kwargs signature dictinspect signatureself forward parameters filteredkwargs k v for k v in kwargs items if k in signature return selfargs filteredkwargs def forward self pixelvalues outputhiddenstates optionalbool none outputattentions optionalbool none returndict optionalbool none raise notimplementederrorthis method should be implemented by the derived class def todictself output super todict outputoutfeatures output popoutfeatures outputoutindices output popoutindices return output class backboneconfigmixin property def outfeaturesself return self outfeatures outfeatures setter def outfeaturesself outfeatures liststr self outfeatures self outindices getalignedoutputfeaturesoutputindices outfeaturesoutfeatures outindicesnone stagenamesself stagenames property def outindicesself return self outindices outindices setter def outindicesself outindices uniontupleint listint self outfeatures self outindices getalignedoutputfeaturesoutputindices outfeaturesnone outindicesoutindices stagenamesself stagenames def todictself output super todict outputoutfeatures output popoutfeatures outputoutindices output popoutindices return output coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license collection of utils to be used by backbones and their components verify that out_indices and out_features are valid for the given stage_names finds the corresponding out_features and out_indices for the given stage_names the logic is as follows out_features not set out_indices set out_features is set to the out_features corresponding to the out_indices out_indices not set out_features set out_indices is set to the out_indices corresponding to the out_features out_indices and out_features not set out_indices and out_features are set to the last stage out_indices and out_features set input out_indices and out_features are returned args out_features list str the names of the features for the backbone to output out_indices list int or tuple int the indices of the features for the backbone to output stage_names list str the names of the stages of the backbone get the out_features and out_indices so that they are aligned the logic is as follows out_features not set out_indices set out_features is set to the out_features corresponding to the out_indices out_indices not set out_features set out_indices is set to the out_indices corresponding to the out_features out_indices and out_features not set out_indices and out_features are set to the last stage out_indices and out_features set they are verified to be aligned args out_features list str the names of the features for the backbone to output out_indices list int or tuple int the indices of the features for the backbone to output stage_names list str the names of the stages of the backbone first verify that the out_features and out_indices are valid verify that the aligned out_features and out_indices are valid initialize the backbone model from timm the backbone must already be loaded to self _backbone these will diagree with the defaults for the transformers models e g for resnet50 the transformer model has out_features stem stage1 stage2 stage3 stage4 the timm model has out_features act layer1 layer2 layer3 layer4 we verify the out indices and out features are valid number of channels for each stage this is set in the transformer backbone model init method to initialize the backbone this method is called by the constructor of the base class after the pretrained model weights have been loaded set the out_features attribute this will also update the out_indices attribute to match the new out_features set the out_indices attribute this will also update the out_features attribute to match the new out_indices the current backbones will output the number of channels for each stage even if that stage is not in the out_features list serializes this instance to a python dictionary override the default to_dict from pretrainedconfig to include the out_features and out_indices attributes a mixin to support handling the out_features and out_indices attributes for the backbone configurations set the out_features attribute this will also update the out_indices attribute to match the new out_features set the out_indices attribute this will also update the out_features attribute to match the new out_indices serializes this instance to a python dictionary override the default to_dict from pretrainedconfig to include the out_features and out_indices attributes
import enum import inspect from typing import Iterable, List, Optional, Tuple, Union class BackboneType(enum.Enum): TIMM = "timm" TRANSFORMERS = "transformers" def verify_out_features_out_indices( out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]] ): if stage_names is None: raise ValueError("Stage_names must be set for transformers backbones") if out_features is not None: if not isinstance(out_features, (list,)): raise ValueError(f"out_features must be a list {type(out_features)}") if any(feat not in stage_names for feat in out_features): raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}") if out_indices is not None: if not isinstance(out_indices, (list, tuple)): raise ValueError(f"out_indices must be a list or tuple, got {type(out_indices)}") if any(idx >= len(stage_names) for idx in out_indices): raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}") if out_features is not None and out_indices is not None: if len(out_features) != len(out_indices): raise ValueError("out_features and out_indices should have the same length if both are set") if out_features != [stage_names[idx] for idx in out_indices]: raise ValueError("out_features and out_indices should correspond to the same stages if both are set") def _align_output_features_output_indices( out_features: Optional[List[str]], out_indices: Optional[Union[List[int], Tuple[int]]], stage_names: List[str], ): if out_indices is None and out_features is None: out_indices = [len(stage_names) - 1] out_features = [stage_names[-1]] elif out_indices is None and out_features is not None: out_indices = [stage_names.index(layer) for layer in out_features] elif out_features is None and out_indices is not None: out_features = [stage_names[idx] for idx in out_indices] return out_features, out_indices def get_aligned_output_features_output_indices( out_features: Optional[List[str]], out_indices: Optional[Union[List[int], Tuple[int]]], stage_names: List[str], ) -> Tuple[List[str], List[int]]: verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names) output_features, output_indices = _align_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=stage_names ) verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names) return output_features, output_indices class BackboneMixin: backbone_type: Optional[BackboneType] = None def _init_timm_backbone(self, config) -> None: if getattr(self, "_backbone", None) is None: raise ValueError("self._backbone must be set before calling _init_timm_backbone") self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info] self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info] out_indices = self._backbone.feature_info.out_indices out_features = self._backbone.feature_info.module_name() verify_out_features_out_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names ) self._out_features, self._out_indices = out_features, out_indices def _init_transformers_backbone(self, config) -> None: stage_names = getattr(config, "stage_names") out_features = getattr(config, "out_features", None) out_indices = getattr(config, "out_indices", None) self.stage_names = stage_names self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=stage_names ) self.num_features = None def _init_backbone(self, config) -> None: self.config = config self.use_timm_backbone = getattr(config, "use_timm_backbone", False) self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS if self.backbone_type == BackboneType.TIMM: self._init_timm_backbone(config) elif self.backbone_type == BackboneType.TRANSFORMERS: self._init_transformers_backbone(config) else: raise ValueError(f"backbone_type {self.backbone_type} not supported.") @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: List[str]): self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=None, stage_names=self.stage_names ) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[Tuple[int], List[int]]): self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=None, out_indices=out_indices, stage_names=self.stage_names ) @property def out_feature_channels(self): return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)} @property def channels(self): return [self.out_feature_channels[name] for name in self.out_features] def forward_with_filtered_kwargs(self, *args, **kwargs): signature = dict(inspect.signature(self.forward).parameters) filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature} return self(*args, **filtered_kwargs) def forward( self, pixel_values, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ): raise NotImplementedError("This method should be implemented by the derived class.") def to_dict(self): output = super().to_dict() output["out_features"] = output.pop("_out_features") output["out_indices"] = output.pop("_out_indices") return output class BackboneConfigMixin: @property def out_features(self): return self._out_features @out_features.setter def out_features(self, out_features: List[str]): self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=None, stage_names=self.stage_names ) @property def out_indices(self): return self._out_indices @out_indices.setter def out_indices(self, out_indices: Union[Tuple[int], List[int]]): self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=None, out_indices=out_indices, stage_names=self.stage_names ) def to_dict(self): output = super().to_dict() output["out_features"] = output.pop("_out_features") output["out_indices"] = output.pop("_out_indices") return output
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa
import warnings warnings.warn( "transformers.utils.bitsandbytes module is deprecated and will be removed in a future version. Please import bitsandbytes modules directly from transformers.integrations", FutureWarning, ) from ..integrations import ( get_keys_to_not_convert, replace_8bit_linear, replace_with_bnb_linear, set_module_8bit_tensor_to_device, set_module_quantized_tensor_to_device, )
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import requires_backends LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv2Model: def __init__(self, *args, **kwargs): requires_backends(self, ["detectron2"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["detectron2"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"]) class Pop2PianoProcessor(metaclass=DummyObject): _backends = ["essentia", "librosa", "pretty_midi", "scipy", "torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["essentia", "librosa", "pretty_midi", "scipy", "torch"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class FlaxForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGenerationMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessorList(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopKLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopPLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperTimeStampLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_MASKED_LM_MAPPING = None FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None FLAX_MODEL_FOR_PRETRAINING_MAPPING = None FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = None FLAX_MODEL_MAPPING = None class FlaxAutoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBloomPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModelWithProjection(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2LMHeadModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianMTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRegNetPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxResNetPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForAudioClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlaxXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class TFGPT2Tokenizer(metaclass=DummyObject): _backends = ["keras_nlp"] def __init__(self, *args, **kwargs): requires_backends(self, ["keras_nlp"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class Pop2PianoFeatureExtractor(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"]) class Pop2PianoTokenizer(metaclass=DummyObject): _backends = ["music"] def __init__(self, *args, **kwargs): requires_backends(self, ["music"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class PyTorchBenchmark(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PyTorchBenchmarkArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GlueDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GlueDataTrainingArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineTextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineWithRefDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LineByLineWithSOPTextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SquadDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SquadDataTrainingArguments(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TextDataset(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TextDatasetForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlternatingCodebooksLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeamScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeamSearchScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConstrainedBeamSearchScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Constraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConstraintListState(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DisjunctiveConstraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderNoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EpsilonLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EtaLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ExponentialDecayLengthPenalty(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GenerationMixin(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HammingDiversityLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InfNanRemoveLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitNormalization(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsProcessorList(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaxLengthCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaxTimeCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MinNewTokensLengthLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NoBadWordsLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhrasalConstraint(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PrefixConstrainedLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SequenceBiasLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StoppingCriteria(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class StoppingCriteriaList(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TemperatureLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TopKLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TopPLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TypicalLogitsWarper(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UnbatchedClassifierFreeGuidanceLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperTimeStampLogitsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def top_k_top_p_filtering(*args, **kwargs): requires_backends(top_k_top_p_filtering, ["torch"]) class PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class AlbertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlbertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_albert(*args, **kwargs): requires_backends(load_tf_weights_in_albert, ["torch"]) ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = None class AlignModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AlignVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class AltCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AltCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class ASTForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ASTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ASTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = None MODEL_FOR_AUDIO_XVECTOR_MAPPING = None MODEL_FOR_BACKBONE_MAPPING = None MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = None MODEL_FOR_CAUSAL_LM_MAPPING = None MODEL_FOR_CTC_MAPPING = None MODEL_FOR_DEPTH_ESTIMATION_MAPPING = None MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = None MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = None MODEL_FOR_MASK_GENERATION_MAPPING = None MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None MODEL_FOR_MASKED_LM_MAPPING = None MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None MODEL_FOR_OBJECT_DETECTION_MAPPING = None MODEL_FOR_PRETRAINING_MAPPING = None MODEL_FOR_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_TEXT_ENCODING_MAPPING = None MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = None MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = None MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = None MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = None MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = None MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = None MODEL_FOR_VISION_2_SEQ_MAPPING = None MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = None MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = None MODEL_MAPPING = None MODEL_WITH_LM_HEAD_MAPPING = None class AutoBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForAudioXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForImageToImage(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForInstanceSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMaskGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTableQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextEncoding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextToSpectrogram(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTextToWaveform(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVision2Seq(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForVisualQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForZeroShotImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelForZeroShotObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoModelWithLMHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class AutoformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AutoformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BARK_PRETRAINED_MODEL_ARCHIVE_LIST = None class BarkCausalModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkCoarseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkFineModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BarkSemanticModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BART_PRETRAINED_MODEL_ARCHIVE_LIST = None class BartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BartPretrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PretrainedBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BeitBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BeitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_bert(*args, **kwargs): requires_backends(load_tf_weights_in_bert, ["torch"]) class BertGenerationDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertGenerationEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BertGenerationPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_bert_generation(*args, **kwargs): requires_backends(load_tf_weights_in_bert_generation, ["torch"]) BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None class BigBirdForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_big_bird(*args, **kwargs): requires_backends(load_tf_weights_in_big_bird, ["torch"]) BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None class BigBirdPegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BigBirdPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BioGptForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BioGptPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BitBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlenderbotForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlenderbotSmallForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class BlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipForImageTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BlipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Blip2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2QFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Blip2VisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = None class BloomForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BloomPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST = None class BridgeTowerForContrastiveLearning(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerForImageAndTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BridgeTowerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) BROS_PRETRAINED_MODEL_ARCHIVE_LIST = None class BrosForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosSpadeEEForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class BrosSpadeELForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CamembertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CamembertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None class CanineForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CanineModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CaninePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_canine(*args, **kwargs): requires_backends(load_tf_weights_in_canine, ["torch"]) CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ChineseCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ChineseCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ClapAudioModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapAudioModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapFeatureExtractor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClapTextModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class CLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPTextModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPVisionModelWithProjection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST = None class CLIPSegForImageSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CLIPSegVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CLVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class ClvpDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpModelForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ClvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None class CodeGenForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CodeGenModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CodeGenPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConditionalDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrForSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConditionalDetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_convbert(*args, **kwargs): requires_backends(load_tf_weights_in_convbert, ["torch"]) CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvNextBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConvNextV2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ConvNextV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CpmAntForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CpmAntModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CpmAntPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None class CTRLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CTRLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class CvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CvtModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class CvtPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST = None DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST = None class Data2VecAudioForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecAudioPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecTextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Data2VecVisionPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class DebertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class DebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DebertaV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class DecisionTransformerGPT2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DecisionTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeformableDetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeformableDetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeformableDetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeiTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DeiTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MCTCTForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MCTCTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MCTCTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MMBTForClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MMBTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ModalEmbeddings(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenLlamaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RetriBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RetriBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TrajectoryTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TrajectoryTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class AdaptiveEmbedding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TransfoXLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_transfo_xl(*args, **kwargs): requires_backends(load_tf_weights_in_transfo_xl, ["torch"]) VAN_PRETRAINED_MODEL_ARCHIVE_LIST = None class VanForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VanModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VanPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DETA_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetaForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetrForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrForSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DetrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DINAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DinatBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DinatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DINOV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Dinov2Backbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Dinov2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DistilBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class DonutSwinModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DonutSwinPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None class DPRContextEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedContextEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedQuestionEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRPretrainedReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRQuestionEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPRReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) DPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class DPTForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class DPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class EfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class EfficientNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EfficientNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class ElectraForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ElectraPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_electra(*args, **kwargs): requires_backends(load_tf_weights_in_electra, ["torch"]) ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST = None class EncodecModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncodecPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None class ErnieForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErniePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ERNIE_M_PRETRAINED_MODEL_ARCHIVE_LIST = None class ErnieMForInformationExtraction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ErnieMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None class EsmFoldPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForProteinFolding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class EsmPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FALCON_PRETRAINED_MODEL_ARCHIVE_LIST = None class FalconForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FalconPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlaubertWithLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlavaForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaImageCodebook(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaImageModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaMultimodalModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FlavaTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class FNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class FocalNetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FocalNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FSMTForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FSMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PretrainedFSMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None class FunnelBaseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FunnelPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_funnel(*args, **kwargs): requires_backends(load_tf_weights_in_funnel, ["torch"]) class FuyuForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class FuyuPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class GitForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GitVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GLPN_PRETRAINED_MODEL_ARCHIVE_LIST = None class GLPNForDepthEstimation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GLPNModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GLPNPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2LMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPT2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_gpt2(*args, **kwargs): requires_backends(load_tf_weights_in_gpt2, ["torch"]) GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTBigCodeForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTBigCodePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_gpt_neo(*args, **kwargs): requires_backends(load_tf_weights_in_gpt_neo, ["torch"]) GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoXForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapaneseLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapaneseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTJForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTJPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GPTSAN_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None class GPTSanJapaneseForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTSanJapaneseModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GPTSanJapanesePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class GraphormerForGraphClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GraphormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GraphormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class GroupViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class GroupViTVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class HubertForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class HubertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class IBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST = None class IdeficsForVisionText2Text(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class IdeficsProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ImageGPTForCausalImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ImageGPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_imagegpt(*args, **kwargs): requires_backends(load_tf_weights_in_imagegpt, ["torch"]) INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class InformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class InstructBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipQFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class InstructBlipVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST = None class JukeboxModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxPrior(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class JukeboxVQVAE(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) KOSMOS2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Kosmos2ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Kosmos2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Kosmos2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3ForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LayoutLMv3PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LED_PRETRAINED_MODEL_ARCHIVE_LIST = None class LEDForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LEDPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class LevitForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LevitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LILT_PRETRAINED_MODEL_ARCHIVE_LIST = None class LiltForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LiltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LlamaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class LongformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongformerSelfAttention(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST = None class LongT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LongT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None class LukeForEntityClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForEntityPairClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForEntitySpanClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LukePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertVisualFeatureEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class LxmertXLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None class M2M100ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class M2M100Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class M2M100PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarianMTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class MarkupLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MarkupLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class Mask2FormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Mask2FormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Mask2FormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class MaskFormerForInstanceSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MaskFormerSwinBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MBartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MEGA_PRETRAINED_MODEL_ARCHIVE_LIST = None class MegaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MegatronBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MegatronBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST = None class MgpstrForSceneTextRecognition(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MgpstrModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MgpstrPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MistralPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilebert(*args, **kwargs): requires_backends(load_tf_weights_in_mobilebert, ["torch"]) MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileNetV1ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV1Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV1PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilenet_v1(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v1, ["torch"]) MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileNetV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2ForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileNetV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_mobilenet_v2(*args, **kwargs): requires_backends(load_tf_weights_in_mobilenet_v2, ["torch"]) MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class MobileViTV2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2ForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MobileViTV2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class MPNetForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MPNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class MptForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MptPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class MraForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MraPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = None class MusicgenForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MusicgenProcessor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) MVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class MvpForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class MvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class NatBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST = None class NezhaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NezhaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST = None class NllbMoeForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeSparseMLP(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NllbMoeTop2Router(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class NystromformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class NystromformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ONEFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class OneFormerForUniversalSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OneFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OneFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OpenAIGPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_openai_gpt(*args, **kwargs): requires_backends(load_tf_weights_in_openai_gpt, ["torch"]) OPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OPTForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OPTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OWLV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Owlv2ForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2TextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Owlv2VisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class OwlViTForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class OwlViTVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PATCHTST_PRETRAINED_MODEL_ARCHIVE_LIST = None class PatchTSTForClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForPretraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTForRegression(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PatchTSTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None class PegasusXForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusXModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PegasusXPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForImageClassificationFourier(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForImageClassificationLearned(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForMultimodalAutoencoding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForOpticalFlow(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PerceiverPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PersimmonPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PHI_PRETRAINED_MODEL_ARCHIVE_LIST = None class PhiForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PhiPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Pix2StructForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pix2StructVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PLBART_PRETRAINED_MODEL_ARCHIVE_LIST = None class PLBartForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PLBartPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class PoolFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PoolFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PoolFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) POP2PIANO_PRETRAINED_MODEL_ARCHIVE_LIST = None class Pop2PianoForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Pop2PianoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class ProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ProphetNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) PVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class PvtForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class PvtPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class QDQBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class QDQBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_qdqbert(*args, **kwargs): requires_backends(load_tf_weights_in_qdqbert, ["torch"]) class RagModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagSequenceForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RagTokenForGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None class RealmEmbedder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmForOpenQA(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmKnowledgeAugEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmReader(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmRetriever(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RealmScorer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_realm(*args, **kwargs): requires_backends(load_tf_weights_in_realm, ["torch"]) REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class ReformerAttention(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerModelWithLMHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ReformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class RegNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RegNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RegNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RemBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RemBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_rembert(*args, **kwargs): requires_backends(load_tf_weights_in_rembert, ["torch"]) RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class ResNetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ResNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class RobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None class RobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class RoCBertForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoCBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_roc_bert(*args, **kwargs): requires_backends(load_tf_weights_in_roc_bert, ["torch"]) ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class RoFormerForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_roformer(*args, **kwargs): requires_backends(load_tf_weights_in_roformer, ["torch"]) RWKV_PRETRAINED_MODEL_ARCHIVE_LIST = None class RwkvForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RwkvModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class RwkvPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None class SamModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SamPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEAMLESS_M4T_PRETRAINED_MODEL_ARCHIVE_LIST = None class SeamlessM4TCodeHifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TForTextToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4THifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TTextToUnitForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4TTextToUnitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEAMLESS_M4T_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class SeamlessM4Tv2ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2ForTextToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SeamlessM4Tv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SegformerDecodeHead(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SegformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None class SEWForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None class SEWDForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SEWDPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class Speech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2TextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2TextPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2Text2ForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Speech2Text2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST = None class SpeechT5ForSpeechToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5ForSpeechToText(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5ForTextToSpeech(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5HifiGan(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SpeechT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SplinterForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SplinterPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class SqueezeBertForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertModule(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SqueezeBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwiftFormerForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwiftFormerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwiftFormerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwinBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwinPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWIN2SR_PRETRAINED_MODEL_ARCHIVE_LIST = None class Swin2SRForImageSuperResolution(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swin2SRModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swin2SRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Swinv2ForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2ForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Swinv2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) SWITCH_TRANSFORMERS_PRETRAINED_MODEL_ARCHIVE_LIST = None class SwitchTransformersEncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersSparseMLP(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class SwitchTransformersTop1Router(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) T5_PRETRAINED_MODEL_ARCHIVE_LIST = None class T5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class T5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_t5(*args, **kwargs): requires_backends(load_tf_weights_in_t5, ["torch"]) TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TableTransformerForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TableTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TableTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None class TapasForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TapasPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_tapas(*args, **kwargs): requires_backends(load_tf_weights_in_tapas, ["torch"]) TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TimeSeriesTransformerForPrediction(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimeSeriesTransformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TimesformerForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimesformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimesformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TimmBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None class TrOCRForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TrOCRPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TVLT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TvltForAudioVisualClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) TVP_PRETRAINED_MODEL_ARCHIVE_LIST = None class TvpForVideoGrounding(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvpModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class TvpPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5EncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UMT5PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None class UniSpeechForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UniSpeechSatPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class UnivNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UperNetForSemanticSegmentation(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class UperNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None class VideoMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VideoMAEPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViltForImageAndTextRetrieval(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForImagesAndTextClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViltPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class VisualBertForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForRegionToPhraseAlignment(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertForVisualReasoning(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VisualBertPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTForMaskedImageModeling(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTHybridForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTHybridModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTHybridPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTMAEForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAELayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAEModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMAEPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None class ViTMSNForImageClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMSNModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class ViTMSNPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITDET_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitDetBackbone(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitDetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitDetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITMATTE_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitMatteForImageMatting(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitMattePreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VITS_PRETRAINED_MODEL_ARCHIVE_LIST = None class VitsModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VitsPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class VivitForVideoClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VivitModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class VivitPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2Model(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class Wav2Vec2ConformerForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForPreTraining(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Wav2Vec2ConformerPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class WavLMForAudioFrameClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForCTC(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMForXVector(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WavLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None class WhisperForAudioClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class WhisperPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class XCLIPModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPTextModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XCLIPVisionModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class XGLMForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XGLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XGLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMWithLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMProphetNetDecoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetEncoder(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetForConditionalGeneration(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMProphetNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLMRobertaXLForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLMRobertaXLPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class XLNetForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetLMHeadModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XLNetPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def load_tf_weights_in_xlnet(*args, **kwargs): requires_backends(load_tf_weights_in_xlnet, ["torch"]) XMOD_PRETRAINED_MODEL_ARCHIVE_LIST = None class XmodForCausalLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class XmodPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST = None class YolosForObjectDetection(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YolosModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YolosPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None class YosoForMaskedLM(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForMultipleChoice(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForQuestionAnswering(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoForTokenClassification(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoLayer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class YosoPreTrainedModel(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class Adafactor(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) class AdamW(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def get_constant_schedule(*args, **kwargs): requires_backends(get_constant_schedule, ["torch"]) def get_constant_schedule_with_warmup(*args, **kwargs): requires_backends(get_constant_schedule_with_warmup, ["torch"]) def get_cosine_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_schedule_with_warmup, ["torch"]) def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"]) def get_inverse_sqrt_schedule(*args, **kwargs): requires_backends(get_inverse_sqrt_schedule, ["torch"]) def get_linear_schedule_with_warmup(*args, **kwargs): requires_backends(get_linear_schedule_with_warmup, ["torch"]) def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"]) def get_scheduler(*args, **kwargs): requires_backends(get_scheduler, ["torch"]) class Conv1D(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def apply_chunking_to_forward(*args, **kwargs): requires_backends(apply_chunking_to_forward, ["torch"]) def prune_layer(*args, **kwargs): requires_backends(prune_layer, ["torch"]) class Trainer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) def torch_distributed_zero_first(*args, **kwargs): requires_backends(torch_distributed_zero_first, ["torch"]) class Seq2SeqTrainer(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends SLOW_TO_FAST_CONVERTERS = None def convert_slow_tokenizer(*args, **kwargs): requires_backends(convert_slow_tokenizer, ["sentencepiece", "tokenizers"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class AlbertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BarthezTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BartphoTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BertGenerationTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class BigBirdTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CamembertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CodeLlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class CpmTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class DebertaV2Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ErnieMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class FNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class GPTSw3Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LayoutXLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class LlamaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class M2M100Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MarianTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBart50Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MLukeTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class MT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class NllbTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PegasusTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class PLBartTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class ReformerTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class RemBertTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SeamlessM4TTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class Speech2TextTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class SpeechT5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class T5Tokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XGLMTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMProphetNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLMRobertaTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"]) class XLNetTokenizer(metaclass=DummyObject): _backends = ["sentencepiece"] def __init__(self, *args, **kwargs): requires_backends(self, ["sentencepiece"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class ASTFeatureExtractor(metaclass=DummyObject): _backends = ["speech"] def __init__(self, *args, **kwargs): requires_backends(self, ["speech"]) class Speech2TextFeatureExtractor(metaclass=DummyObject): _backends = ["speech"] def __init__(self, *args, **kwargs): requires_backends(self, ["speech"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class TFBertTokenizer(metaclass=DummyObject): _backends = ["tensorflow_text"] def __init__(self, *args, **kwargs): requires_backends(self, ["tensorflow_text"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class TensorFlowBenchmarkArguments(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TensorFlowBenchmark(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFForceTokensLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGenerationMixin(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsProcessorList(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFNoBadWordsLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFNoRepeatNGramLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRepetitionPenaltyLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSuppressTokensAtBeginLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSuppressTokensLogitsProcessor(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTopKLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTopPLogitsWarper(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) def tf_top_k_top_p_filtering(*args, **kwargs): requires_backends(tf_top_k_top_p_filtering, ["tf"]) class KerasMetricCallback(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class PushToHubCallback(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSequenceSummary(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSharedEmbeddings(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) def shape_list(*args, **kwargs): requires_backends(shape_list, ["tf"]) TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFAlbertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_CAUSAL_LM_MAPPING = None TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_MASK_GENERATION_MAPPING = None TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = None TF_MODEL_FOR_MASKED_LM_MAPPING = None TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None TF_MODEL_FOR_PRETRAINING_MAPPING = None TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None TF_MODEL_FOR_TEXT_ENCODING_MAPPING = None TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None TF_MODEL_FOR_VISION_2_SEQ_MAPPING = None TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = None TF_MODEL_MAPPING = None TF_MODEL_WITH_LM_HEAD_MAPPING = None class TFAutoModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForAudioClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMaskGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTableQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTextEncoding(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelForZeroShotImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFAutoModelWithLMHead(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBartPretrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFBertEmbeddings(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFBlipForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipForImageTextRetrieval(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFBlipVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCamembertForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCamembertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCLIPModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCLIPVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFConvBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2ForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFConvNextV2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCTRLForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCTRLPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFCvtForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCvtModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFCvtPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFData2VecVisionPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDebertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDebertaV2ForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2ForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDebertaV2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDeiTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDeiTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFAdaptiveEmbedding(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTransfoXLPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFDPRContextEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedContextEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedQuestionEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRPretrainedReader(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRQuestionEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFDPRReader(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFEfficientFormerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerForImageClassificationWithTeacher(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEfficientFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFElectraForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFElectraPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFEsmForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFEsmPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFFlaubertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFlaubertWithLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFFunnelBaseModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFFunnelPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFGPT2DoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2LMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2MainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFGroupViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTTextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFGroupViTVisionModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFHubertForCTC(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFHubertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFHubertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLayoutLMForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3ForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLEDPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLongformerForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLongformerSelfAttention(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFLxmertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFLxmertVisualFeatureEncoder(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianMTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMarianPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMBartPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFMobileBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFMobileViTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMobileViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFMPNetForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMPNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5EncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFMT5Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFOpenAIGPTDoubleHeadsModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOpenAIGPTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFOPTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagSequenceForGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRagTokenForGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRegNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRegNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRegNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRemBertForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRemBertPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFResNetForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFResNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFResNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFRoFormerForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SAM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSamModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSamPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSegformerDecodeHead(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerForSemanticSegmentation(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSegformerPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSpeech2TextForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSpeech2TextModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSpeech2TextPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFSwinForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinForMaskedImageModeling(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFSwinPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFT5EncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFT5PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFTapasForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFTapasPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTForImageClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEForPreTraining(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFViTMAEPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2ForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2Model(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWhisperModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXGLMForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXGLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXLMForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMWithLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None class TFXLNetForMultipleChoice(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForQuestionAnsweringSimple(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetForTokenClassification(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetLMHeadModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetMainLayer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class TFXLNetPreTrainedModel(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class AdamWeightDecay(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class GradientAccumulator(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) class WarmUp(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) def create_optimizer(*args, **kwargs): requires_backends(create_optimizer, ["tf"]) class TFTrainer(metaclass=DummyObject): _backends = ["tf"] def __init__(self, *args, **kwargs): requires_backends(self, ["tf"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class AlbertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BartTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BarthezTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BigBirdTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BlenderbotTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BlenderbotSmallTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class BloomTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CamembertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CLIPTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CodeLlamaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CodeGenTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class ConvBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class CpmTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DebertaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DebertaV2TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RetriBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DistilBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DPRContextEncoderTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DPRQuestionEncoderTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class DPRReaderTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class ElectraTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class FNetTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class FunnelTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class GPT2TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class GPTNeoXTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class GPTNeoXJapaneseTokenizer(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class HerbertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutLMv2TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutLMv3TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LayoutXLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LEDTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LlamaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LongformerTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class LxmertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MarkupLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MBartTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MBart50TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MobileBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MPNetTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MT5TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class MvpTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class NllbTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class NougatTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class OpenAIGPTTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class PegasusTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RealmTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class ReformerTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RemBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RobertaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class RoFormerTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class SeamlessM4TTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class SplinterTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class SqueezeBertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class T5TokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class WhisperTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class XGLMTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class XLMRobertaTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class XLNetTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) class PreTrainedTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"])
this file is autogenerated by the command make fixcopies do not edit this file is autogenerated by the command make fix copies do not edit
from ..utils import DummyObject, requires_backends class ImageProcessingMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageFeatureExtractionMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BlipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BridgeTowerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientNetImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FuyuProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class IdeficsImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Mask2FormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class NougatImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OneFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Owlv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Pix2StructImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PvtImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SamImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Swin2SRImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvpImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTHybridImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VitMatteImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VivitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"])
codingutf8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license xlnet todo add support for them as it should be quite easy to do so small blocking issues xlnetforquestionanswering torch where returns the broadcasted tensor of condition x and y so hack it by using addition todo infer shape without performing the computation this might be quite hard todo infer shape without performing the computation proxy that uses metadata to handle datadependent controlflow hack so we can track when devices are used during metatensor propagation replace these values with a constant meta note not added to the graph yet if this is a method call we peephole optimize to the method invocation the node for attributes is added lazily since most will just be method calls which do not rely on the getitem call returns the underlying metadata for hfproxies and behaves like the identity for the others if isinstancev metadeviceattribute return meta if isinstancev torch fx proxy if not isinstancev hfproxy and hasattrv metadata raise runtimeerrorfno metadata was found for v return v metadata return v def genconstructorwrappertarget functools wrapstarget def wrapperargs kwargs proxy none def checkhasproxyv if isinstancev proxy nonlocal proxy proxy v torch fx node mapaggregateargs checkhasproxy torch fx node mapaggregatekwargs checkhasproxy if proxy is not none return proxy tracer createproxycallfunction target args kwargs else return targetargs kwargs return wrapper target def generaterandomintlow int 10 high int 20 forbiddenvalues optionallistint none if forbiddenvalues is none forbiddenvalues value random randintlow high while value in forbiddenvalues value random randintlow high return value class hftracertracer feature flag for proxying accesses to buffer values proxybufferattributes bool true allowinsertstatelessmods bool true torchmethodstopatch arange zeros ones full fulllike eye empty tensor clamp finfo supportedarchs pretrainedmodel if not ispeftavailable else pretrainedmodel peftmodel def initself autowrapmodulesmath autowrapfunctions super initautowrapmodulesautowrapmodules autowrapfunctionsautowrapfunctions if not istorchfxavailable raise importerror ffound an incompatible version of torch found version gettorchversion but only version ftorchfxrequiredversion is supported def generatedummyinput self model pretrainedmodel inputname str shape listint dictstr torch tensor retrieving the model class either from the classfordeserialization attribute if the model was restored from pickle or from the class attribute in the general case if no numchannels is in the config use some arbitrary value generating big sequence length for audio inputs note tensor constructors in pytorch define the device argument as kwargsonly that is why this works if you add methods to torchmethodstopatch that do not define device as kwargonly this will break and you will likely see issues where we cannot infer the size of the output replaced by getattr from pytorch 1 13 needed for pytorch 1 13 traces root and returns the corresponding fx torch fx graph representation root can either be a torch nn module instance or a python callable note that after this call self root may be different from the root passed in here for example when a free function is passed to trace we will create a torch nn module instance to use as the root and add embedded constants to args root torch nn module or callable either a torch nn module or a function to be traced through if root is not a transformers pretrainedmodel then dummyinputs must be passed otherwise tracing will fail concreteargs dictstr any optional concrete arguments that should not be treated as proxies dummyinputs dictstr any optional the dummy inputs needed to handle datadependent controlflow if root is not a transformers pretrainedmodel it can also be used when root is a transformers pretrainedmodel to specify custom dummy inputs for a subset or all the model inputs completeconcreteargswithinputsnotindummyinputs bool optional defaults to true if true and dummyinputs is specified every argument that root can take that is not in dummyinputs and not in concreteargs will be added to concreteargs otherwise does nothing returns torch fx graph a fx torch fx graph representing the semantics of the passedin root creating a random input shape to generate dummy inputs we enforce that root must either be a pretrainedmodel or deserialized from a serialized traced model to be able to use hftracer generatedummyinput this is necessary because concrete args are added as input to the traced module since https github compytorchpytorchpull55888 removing default values for inputs as the forward pass will fail with them without this torch jit script fails because the inputs type is optionaltorch tensor it cannot infer on the attributes and methods the input should have and fails it is a concrete arg so it is not used and should be removed todo solves graphmodule creation without this return type annotation tuple is causing code execution failure whether the module was instantiated with proxies if that is the case such module cannot be a leaf module because its attributes are inputdependent helper method which tries to insert a module that was not declared as submodule if one of the module attributes is a proxy it means that its instantiation is inputdependent it is not possible to insert such modules those should be traced through no need to add multiple instances of the same module helper method to find the qualified name of mod in the module hierarchy of root for example if root has a submodule named foo which has a submodule named bar passing bar into this function will return the string foo bar args mod str the module to retrieve the qualified name for called when a proxy object is has the keys method called this is what happens when is called on a proxy this should return an iterator if is supposed to work in your custom tracer performs symbolic tracing on the model args model pretrainedmodel the model to trace inputnames liststr optional the names of the inputs of the traced model if unset model dummyinputs keys are used instead disablecheck bool optional defaults to false if true no check is done before trying to trace the model this is mostly usesul for debugging purposes tracercls typehftracer optional defaults to hftracer the tracer class to use for instantiating the tracer if unset hftracer is used instead returns torch fx graphmodule a graphmodule constructed by recording operations seen while tracing the model example python from transformers utils fx import symbolictrace tracedmodel symbolictracemodel inputnamesinputids attentionmask tokentypeids tracing the model class must be stored as an attribute to allow model deserialization which uses trace and thus generatedummyinput where the model class is needed coding utf 8 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license xlnet todo add support for them as it should be quite easy to do so small blocking issues xlnetforquestionanswering torch where returns the broadcasted tensor of condition x and y so hack it by using addition any value todo infer shape without performing the computation this might be quite hard todo infer shape without performing the computation proxy that uses metadata to handle data dependent control flow hack so we can track when devices are used during meta tensor propagation replace these values with a constant meta note not added to the graph yet if this is a method call we peephole optimize to the method invocation the node for attributes is added lazily since most will just be method calls which do not rely on the getitem call returns the underlying metadata for hfproxies and behaves like the identity for the others tracer that is able to symbolically trace models from the library to do that it uses the hfproxy instead of the regular pytorch torch fx proxy feature flag for proxying accesses to buffer values generates dummy input for model inference recording retrieving the model class either from the class_for_deserialization attribute if the model was restored from pickle or from the __class__ attribute in the general case if no num_channels is in the config use some arbitrary value generating big sequence length for audio inputs note tensor constructors in pytorch define the device argument as kwargs only that is why this works if you add methods to _torch_methods_to_patch that do not define device as kwarg only this will break and you will likely see issues where we cannot infer the size of the output replaced by getattr from pytorch 1 13 type ignore arg type needed for pytorch 1 13 traces root and returns the corresponding fx torch fx graph representation root can either be a torch nn module instance or a python callable note that after this call self root may be different from the root passed in here for example when a free function is passed to trace we will create a torch nn module instance to use as the root and add embedded constants to args root torch nn module or callable either a torch nn module or a function to be traced through if root is not a transformers pretrainedmodel then dummy_inputs must be passed otherwise tracing will fail concrete_args dict str any optional concrete arguments that should not be treated as proxies dummy_inputs dict str any optional the dummy inputs needed to handle data dependent control flow if root is not a transformers pretrainedmodel it can also be used when root is a transformers pretrainedmodel to specify custom dummy inputs for a subset or all the model inputs complete_concrete_args_with_inputs_not_in_dummy_inputs bool optional defaults to true if true and dummy_inputs is specified every argument that root can take that is not in dummy_inputs and not in concrete_args will be added to concrete_args otherwise does nothing returns torch fx graph a fx torch fx graph representing the semantics of the passed in root creating a random input shape to generate dummy inputs we enforce that root must either be a pretrainedmodel or deserialized from a serialized traced model to be able to use hftracer _generate_dummy_input this is necessary because concrete args are added as input to the traced module since https github com pytorch pytorch pull 55888 removing default values for inputs as the forward pass will fail with them without this torch jit script fails because the inputs type is optional torch tensor it cannot infer on the attributes and methods the input should have and fails it is a concrete arg so it is not used and should be removed todo solves graphmodule creation without this return type annotation tuple is causing code execution failure whether the module was instantiated with proxies if that is the case such module cannot be a leaf module because its attributes are input dependent helper method which tries to insert a module that was not declared as submodule if one of the module attributes is a proxy it means that its instantiation is input dependent it is not possible to insert such modules those should be traced through no need to add multiple instances of the same module helper method to find the qualified name of mod in the module hierarchy of root for example if root has a submodule named foo which has a submodule named bar passing bar into this function will return the string foo bar args mod str the module to retrieve the qualified name for called when a proxy object is has the keys method called this is what happens when is called on a proxy this should return an iterator if is supposed to work in your custom tracer performs symbolic tracing on the model args model pretrainedmodel the model to trace input_names list str optional the names of the inputs of the traced model if unset model dummy_inputs keys are used instead disable_check bool optional defaults to false if true no check is done before trying to trace the model this is mostly usesul for debugging purposes tracer_cls type hftracer optional defaults to hftracer the tracer class to use for instantiating the tracer if unset hftracer is used instead returns torch fx graphmodule a graphmodule constructed by recording operations seen while tracing the model example python from transformers utils fx import symbolic_trace traced_model symbolic_trace model input_names input_ids attention_mask token_type_ids tracing the model class must be stored as an attribute to allow model deserialization which uses trace and thus _generate_dummy_input where the model class is needed
import builtins import collections import functools import inspect import math import operator import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union import torch from torch import nn from torch.fx import Graph, GraphModule, Proxy, Tracer from torch.fx._compatibility import compatibility from torch.fx.proxy import ParameterProxy from .. import PretrainedConfig, PreTrainedModel, logging from ..models.auto import get_values from ..models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from ..utils import ( ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, get_torch_version, is_peft_available, is_torch_fx_available, ) if is_peft_available(): from peft import PeftModel logger = logging.get_logger(__name__) _IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES def _generate_supported_model_class_names( model_name: Type[PretrainedConfig], supported_tasks: Optional[Union[str, List[str]]] = None, ) -> List[str]: task_mapping = { "default": MODEL_MAPPING_NAMES, "pretraining": MODEL_FOR_PRETRAINING_MAPPING_NAMES, "next-sentence-prediction": MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, "masked-lm": MODEL_FOR_MASKED_LM_MAPPING_NAMES, "causal-lm": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "masked-image-modeling": MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, "ctc": MODEL_FOR_CTC_MAPPING_NAMES, "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "semantic-segmentation": MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, "backbone": MODEL_FOR_BACKBONE_MAPPING_NAMES, } if supported_tasks is None: supported_tasks = task_mapping.keys() if isinstance(supported_tasks, str): supported_tasks = [supported_tasks] model_class_names = [] for task in supported_tasks: class_name = task_mapping[task].get(model_name, None) if class_name: model_class_names.append(class_name) return model_class_names _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS = [ "altclip", "albert", "bart", "bert", "blenderbot", "blenderbot-small", "bloom", "clip", "convnext", "deberta", "deberta-v2", "dinov2", "distilbert", "donut-swin", "electra", "gpt2", "gpt_neo", "gptj", "hubert", "layoutlm", "lxmert", "m2m_100", "marian", "mbart", "megatron-bert", "mobilebert", "mt5", "nezha", "opt", "pegasus", "plbart", "resnet", "roberta", "segformer", "speech_to_text", "speech_to_text_2", "swin", "t5", "trocr", "vit", "xglm", "wav2vec2", ] _REGULAR_SUPPORTED_MODELS = [] for item in _REGULAR_SUPPORTED_MODEL_NAMES_AND_TASKS: if isinstance(item, dict): _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(**item)) else: _REGULAR_SUPPORTED_MODELS.extend(_generate_supported_model_class_names(item)) _SPECIAL_SUPPORTED_MODELS = [ "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", "AltCLIPTextModel", "AltCLIPVisionModel", "GitVisionModel", "GPT2DoubleHeadsModel", "Speech2Text2Decoder", "TrOCRDecoder", "PeftModelForCausalLM", "PeftModelForSeq2SeqLM", ] _SUPPORTED_MODELS = tuple(sorted(set(_REGULAR_SUPPORTED_MODELS + _SPECIAL_SUPPORTED_MODELS))) def torch_nn_embedding(self, input): return torch.empty(*input.shape, self.weight.shape[-1], device="meta", dtype=self.weight.dtype) def torch_nn_functional_embedding( input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False ): return torch.empty(*input.shape, weight.shape[-1], device="meta", dtype=weight.dtype) def torch_nn_layernorm(self, input): return input def torch_nn_groupnorm(self, input): return input def torch_nn_linear(self, input): return torch.empty(input.shape[:-1] + (self.out_features,), device="meta") def torch_relu(x): return x def torch_nn_relu(self, x): return x def torch_nn_functional_relu(x, inplace=False): if not inplace: raise ValueError("Don't support in-place functional.relu for MetaTensor analysis") return x def torch_where(condition, x, y): return condition.to(device="meta") + x.to(device="meta") + y.to(device="meta") def torch_abs(input, *, out=None): if out is not None: raise ValueError("Don't support in-place abs for MetaTensor analysis") return input def torch_arange(*args, **kwargs): n = len(args) step = 1 if n == 1: start = 0 end = args[0] elif n == 2: start, end = args else: start, end, step = args if isinstance(start, float): start = int(start) if isinstance(end, float): start = int(end) if isinstance(step, float): step = int(step) step = kwargs.get("step", step) dtype = kwargs.get("dtype") return torch.empty((end - start) // step, dtype=dtype, device="meta") def torch_full(*args, **kwargs): args = list(args) if isinstance(args[1], torch.Tensor) and args[1].device == torch.device("meta"): args[1] = 1 kwargs_without_device = dict(kwargs) kwargs_without_device.pop("device", None) return torch.full(*args, **kwargs_without_device) def torch_cat(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + dim shapes = [t.shape for t in tensors] shape = list(shapes[0]) concatenated_dim = sum(shape[dim] for shape in shapes) final_shape = shape[:dim] + [concatenated_dim] + shape[dim + 1 :] return torch.empty(final_shape, device="meta") def torch_stack(tensors, dim=None, axis=None, *, out=None): if dim is None and axis is None: dim = 0 if dim is None and axis is not None: dim = axis if dim < 0: dim = tensors[0].dim() + 1 + dim shape = list(tensors[0].shape) shape.insert(dim, len(tensors)) return torch.empty(shape, device="meta") def torch_add(input, other, *, alpha=1, out=None): if not isinstance(input, torch.Tensor): return torch.empty_like(other, device="meta") if not isinstance(other, torch.Tensor): return torch.empty_like(input, device="meta") max_length = max(input.dim(), other.dim()) input_shape = list(input.shape) + [1] * (max_length - input.dim()) other_shape = list(other.shape) + [1] * (max_length - other.dim()) shape = [] for i in range(max_length): shape.append(max(input_shape[i], other_shape[i])) return torch.empty(shape, device="meta") def torch_mul(input, other, *, out=None): return torch_add(input, other, out=out) def torch_tensor_mul(self, other): return torch_mul(self, other) def torch_matmul(input, other, *, out=None): d1 = input.dim() d2 = other.dim() shape = None if d1 == 1 and d2 == 1: shape = None elif d1 == 2 and d2 == 2: shape = (input.size(0), other.size(1)) elif d1 == 1 and d2 == 2: shape = (other.size(1),) elif d1 == 2 and d1 == 1: shape = (input.size(0),) else: max_length = max(input.dim(), other.dim()) shape1 = list(input.shape) shape2 = list(other.shape) if d1 == 1: shape1 = [1] + shape1 if d2 == 1: shape2.append(1) shape1 = [-1] * (max_length - d1) + list(input.shape) shape2 = [-1] * (max_length - d2) + list(other.shape) shape = [] for i in range(max_length): shape.append(max(shape1[i], shape2[i])) shape[-2] = shape1[-2] shape[-1] = shape2[-1] if d1 == 1: shape.pop(-2) if d2 == 1: shape.pop(-1) if shape is None: return torch.tensor(0.0, device="meta") return torch.empty(*shape, device="meta") def torch_bmm(input, mat2, *, out=None): if out is not None: raise ValueError("Don't support in-place bmm for MetaTensor analysis") batch_size, n, m = input.shape _, _, p = mat2.shape return torch.empty(batch_size, n, p, device="meta") def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None): if out is not None: raise ValueError("Don't support in-place baddbmm for MetaTensor analysis") return torch_bmm(batch1, batch2) def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None): return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out) def torch_einsum(equation, *operands): concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands) return torch.einsum(equation, *concrete_operands).to("meta") def torch_tensor_repeat(self, *sizes): shape = list(self.shape) for i, x in enumerate(sizes): shape[i] *= x return torch.empty(shape, device="meta") def torch_repeat_interleave(*args, dim=None, output_size=None): num_args = len(args) if num_args == 1: shape = [output_size if output_size is not None else args[0].sum()] else: shape = list(args[0].shape) if dim is None: if num_args > 2: dim = args[2] else: shape = [sum(shape)] dim = 0 repeats = args[1] if isinstance(repeats, int) or torch.numel(repeats) == 1: shape[dim] *= int(repeats) else: shape[dim] = output_size if output_size is not None else repeats.sum() return torch.empty(*shape, device="meta") def torch_index_select(input, dim, index, *, out=None): shape = list(input.shape) shape[dim] = len(index) return torch.empty(*shape, device="meta") def torch_tensor_index_select(self, dim, index): return torch_index_select(self, dim, index) def torch_gather(input, dim, index, *, sparse_grad=False, out=None): shape = list(input.shape) shape[dim] = index.shape[dim] return torch.empty(*shape, device="meta") def torch_tensor_gather(self, dim, index): return torch_gather(self, dim, index) def torch_roll(input, shifts, dims=None): return input def torch_flip(input, dims): return input def torch_tensor_flip(self, dims): return self def torch_nn_conv1d(self, input): l_in = input.shape[-1] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) l_out = math.floor( (l_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) shape[-1] = l_out shape[-2] = self.out_channels return torch.empty(shape, device="meta") def torch_nn_conv2d(self, input): h_in, w_in = input.shape[-2:] shape = None padding = self.padding if padding == "valid": padding = (0, 0) if padding == "same": shape = list(input.shape) if shape is None: shape = list(input.shape) h_out = math.floor( (h_in + 2 * padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1 ) w_out = math.floor( (w_in + 2 * padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1 ) shape[-2:] = [h_out, w_out] shape[-3] = self.out_channels return torch.empty(shape, device="meta") def torch_squeeze(input, dim=None): shape = list(input.shape) if dim is not None: if dim < 0: dim = input.dim() + dim if shape[dim] == 1: shape.pop(dim) else: new_shape = [] for dim_value in shape: if dim_value == 1: continue new_shape.append(dim_value) shape = new_shape return torch.empty(shape, device="meta") def torch_tensor_squeeze(self, dim=None): return torch_squeeze(self, dim) def torch_unsqueeze(input, dim): shape = list(input.shape) if dim < 0: dim = input.dim() + 1 + dim shape.insert(dim, 1) return torch.empty(shape, device="meta") def torch_tensor_unsqueeze(self, dim): return torch_unsqueeze(self, dim) def torch_unique_consecutive(input, **kwargs): output = torch.unique_consecutive(torch.zeros_like(input, device="cpu"), **kwargs) if isinstance(output, torch.Tensor): return output.to("meta") else: return tuple(map(output, lambda x: x.to("meta"))) def torch_nn_functional_one_hot(tensor, num_classes=-1): if num_classes < 0: raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis") shape = list(tensor.shape) + [num_classes] return torch.empty(shape, device="meta") def torch_nn_mseloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def torch_nn_crossentropyloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def torch_nn_bcewithlogitsloss(self, input, target): if self.reduction == "none": shape = target.shape else: shape = (1,) return torch.empty(shape, device="meta") def operator_getitem(a, b): def to_concrete(t): if isinstance(t, torch.Tensor): concrete = torch.ones_like(t, device="cpu") if concrete.dtype in [torch.float16, torch.float32, torch.float64, torch.int32]: concrete = concrete.to(torch.int64) return concrete return t if isinstance(a, torch.Tensor): if isinstance(b, tuple): b = tuple(map(to_concrete, b)) else: b = to_concrete(b) return operator.getitem(torch.empty_like(a, device="cpu"), b).to("meta") return operator.getitem(a, b) _MANUAL_META_OVERRIDES: Dict[Callable, Callable] = { torch.nn.Embedding: torch_nn_embedding, torch.nn.functional.embedding: torch_nn_functional_embedding, torch.nn.LayerNorm: torch_nn_layernorm, torch.nn.GroupNorm: torch_nn_groupnorm, torch.nn.Linear: torch_nn_linear, torch.relu: torch_relu, torch.nn.functional.relu: torch_nn_functional_relu, torch.nn.ReLU: torch_nn_relu, torch.where: torch_where, torch.abs: torch_abs, torch.arange: torch_arange, torch.full: torch_full, torch.cat: torch_cat, torch.stack: torch_stack, torch.add: torch_add, torch.mul: torch_mul, torch.Tensor.mul: torch_tensor_mul, torch.matmul: torch_matmul, torch.bmm: torch_bmm, torch.baddbmm: torch_baddbmm, torch.Tensor.baddbmm: torch_tensor_baddbmm, torch.einsum: torch_einsum, torch.Tensor.repeat: torch_tensor_repeat, torch.repeat_interleave: torch_repeat_interleave, torch.roll: torch_roll, torch.flip: torch_flip, torch.Tensor.flip: torch_tensor_flip, torch.index_select: torch_index_select, torch.Tensor.index_select: torch_tensor_index_select, torch.gather: torch_gather, torch.Tensor.gather: torch_tensor_gather, torch.nn.Conv1d: torch_nn_conv1d, torch.nn.Conv2d: torch_nn_conv2d, torch.squeeze: torch_squeeze, torch.Tensor.squeeze: torch_tensor_squeeze, torch.unsqueeze: torch_unsqueeze, torch.Tensor.unsqueeze: torch_tensor_unsqueeze, torch.unique_consecutive: torch_unique_consecutive, torch.nn.functional.one_hot: torch_nn_functional_one_hot, torch.nn.MSELoss: torch_nn_mseloss, torch.nn.CrossEntropyLoss: torch_nn_crossentropyloss, torch.nn.BCEWithLogitsLoss: torch_nn_bcewithlogitsloss, operator.getitem: operator_getitem, } class HFProxy(Proxy): def install_metadata(self, metadata): self._metadata = metadata @property def shape(self): return self.tracer.create_proxy("call_method", "size", (self,), {}) @property def device(self): return MetaDeviceAttribute(self, "device") def __len__(self): if hasattr(self, "_metadata") and self._metadata is not None: return len(self._metadata) return super().__len__() def __bool__(self): if hasattr(self, "_metadata") and self._metadata is not None: return self._metadata return super().__bool__() def __getattr__(self, k): if k == "_metadata": return self.__getattribute__(k) return HFAttribute(self, k) def __setitem__(self, indices, values): return self.tracer.create_proxy("call_function", operator.setitem, (self, indices, values), {}) def __contains__(self, key): if hasattr(self, "_metadata") and self._metadata is not None: return key in self._metadata return super().__contains__(key) class HFAttribute(HFProxy): def __init__(self, root, attr: str): self.root = root self.attr = attr self.tracer = root.tracer self._node = None if hasattr(self.root, "_metadata"): self.install_metadata(getattr(self.root._metadata, attr)) @property def node(self): if self._node is None: self._node = self.tracer.create_proxy("call_function", builtins.getattr, (self.root, self.attr), {}).node return self._node def __call__(self, *args, **kwargs): return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs) class MetaDeviceAttribute(HFAttribute): pass def _proxies_to_metas(v): if isinstance(v, MetaDeviceAttribute): return "meta" if isinstance(v, torch.fx.Proxy): if not (isinstance(v, HFProxy) and hasattr(v, "_metadata")): raise RuntimeError(f"No metadata was found for {v}") return v._metadata return v def _gen_constructor_wrapper(target): @functools.wraps(target) def wrapper(*args, **kwargs): proxy = None def check_has_proxy(v): if isinstance(v, Proxy): nonlocal proxy proxy = v torch.fx.node.map_aggregate(args, check_has_proxy) torch.fx.node.map_aggregate(kwargs, check_has_proxy) if proxy is not None: return proxy.tracer.create_proxy("call_function", target, args, kwargs) else: return target(*args, **kwargs) return wrapper, target def _generate_random_int(low: int = 10, high: int = 20, forbidden_values: Optional[List[int]] = None): if forbidden_values is None: forbidden_values = [] value = random.randint(low, high) while value in forbidden_values: value = random.randint(low, high) return value class HFTracer(Tracer): proxy_buffer_attributes: bool = True allow_insert_stateless_mods: bool = True _TORCH_METHODS_TO_PATCH = [ "arange", "zeros", "ones", "full", "full_like", "eye", "empty", "tensor", "clamp", "finfo", ] supported_archs = (PreTrainedModel,) if not is_peft_available() else (PreTrainedModel, PeftModel) def __init__(self, autowrap_modules=(math,), autowrap_functions=()): super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions) if not is_torch_fx_available(): raise ImportError( f"Found an incompatible version of torch. Found version {get_torch_version()}, but only version " f"{TORCH_FX_REQUIRED_VERSION} is supported." ) def _generate_dummy_input( self, model: PreTrainedModel, input_name: str, shape: List[int] ) -> Dict[str, torch.Tensor]: model_class_name = getattr(model, "class_for_deserialization", model.__class__).__name__ device = model.device inputs_dict = {} if input_name in ["labels", "start_positions", "end_positions"]: batch_size = shape[0] if model_class_name in [ *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), "XLNetForQuestionAnswering", ]: inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) inputs_dict["end_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): if not hasattr(model.config, "problem_type") or model.config.problem_type is None: raise ValueError( "Could not retrieve the problem type for the sequence classification task, please set " 'model.config.problem_type to one of the following values: "regression", ' '"single_label_classification", or "multi_label_classification".' ) if model.config.problem_type == "regression": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 elif model.config.problem_type == "single_label_classification": labels_shape = (batch_size,) labels_dtype = torch.long elif model.config.problem_type == "multi_label_classification": labels_shape = (batch_size, model.config.num_labels) labels_dtype = torch.float32 else: raise ValueError( 'Expected model.config.problem_type to be either: "regression", "single_label_classification"' f', or "multi_label_classification", but "{model.config.problem_type}" was provided.' ) inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device) elif model_class_name in [ *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES), "GPT2DoubleHeadsModel", "PeftModelForCausalLM", "PeftModelForSeq2SeqLM", ]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device) elif model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.float32, device=device) else: raise NotImplementedError( f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet." ) elif "pixel_values" in input_name: batch_size = shape[0] image_size = getattr(model.config, "image_size", None) if image_size is None: if hasattr(model.config, "vision_config"): image_size = model.config.vision_config.image_size elif hasattr(model.config, "encoder"): image_size = model.config.encoder.image_size else: image_size = (_generate_random_int(), _generate_random_int()) num_channels = getattr(model.config, "num_channels", 3) if not isinstance(image_size, collections.abc.Iterable): image_size = (image_size, image_size) height, width = image_size inputs_dict[input_name] = torch.zeros( batch_size, num_channels, height, width, dtype=torch.float32, device=device ) elif "bbox" in input_name: inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device) elif "input_features" in input_name: inputs_dict[input_name] = torch.zeros( *shape, model.config.input_feat_per_channel, dtype=torch.float, device=device ) elif "visual_feats" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_feat_dim, ], dtype=torch.float, device=device, ) elif "visual_pos" in input_name: inputs_dict[input_name] = torch.zeros( shape + [ model.config.visual_pos_dim, ], dtype=torch.float, device=device, ) elif "inputs" in input_name: inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device) elif "input_values" in input_name: batch_size, _ = shape seq_length = _generate_random_int(low=10000, high=20000) inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device) elif "mask" in input_name or "ids" in input_name: inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device) else: shape_with_hidden_size = shape + [model.config.hidden_size] inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device) return inputs_dict def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None): rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) if kind == "placeholder" and target in self.meta_args: rv.install_metadata(self.meta_args[target]) return rv if target in self.orig_fns: if "device" in kwargs: kwargs["device"] = "meta" try: args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas) kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas) if kind == "call_function": meta_target = _MANUAL_META_OVERRIDES.get(target, target) meta_out = meta_target(*args_metas, **kwargs_metas) if isinstance(meta_out, torch.Tensor): meta_out = meta_out.to(device="meta") elif kind == "call_method": method = getattr(args_metas[0].__class__, target) meta_target = _MANUAL_META_OVERRIDES.get(method, method) meta_out = meta_target(*args_metas, **kwargs_metas) elif kind == "call_module": if not hasattr(self, "orig_forward"): raise AttributeError(f"{self} does not have an attribute called orig_forward") self._disable_module_getattr = True try: mod = self.root.get_submodule(target) mod_type = type(mod) if mod_type in _MANUAL_META_OVERRIDES: meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas) else: meta_out = self.orig_forward(*args_metas, **kwargs_metas) finally: self._disable_module_getattr = False elif kind == "get_attr": self._disable_module_getattr = True try: attr_itr = self.root atoms = target.split(".") for atom in atoms: attr_itr = getattr(attr_itr, atom) if isinstance(attr_itr, torch.Tensor): meta_out = attr_itr.to(device="meta") else: meta_out = attr_itr finally: self._disable_module_getattr = False else: return rv if not isinstance(rv, Proxy): raise ValueError("Don't support composite output yet") rv.install_metadata(meta_out) except Exception as e: if _IS_IN_DEBUG_MODE: warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") return rv def _module_getattr(self, attr, attr_val, parameter_proxy_cache): if getattr(self, "_disable_module_getattr", False): return attr_val else: def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): for n, p in collection_to_search: if attr_val is p: if n not in parameter_proxy_cache: kwargs = {} if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters: kwargs["proxy_factory_fn"] = ( None if not self.param_shapes_constant else lambda node: ParameterProxy(self, node, n, attr_val) ) val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) parameter_proxy_cache[n] = val_proxy return parameter_proxy_cache[n] return None if isinstance(attr_val, torch.nn.Parameter): maybe_parameter_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_parameters(), parameter_proxy_cache ) if maybe_parameter_proxy is not None: return maybe_parameter_proxy if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): maybe_buffer_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_buffers(), parameter_proxy_cache ) if maybe_buffer_proxy is not None: return maybe_buffer_proxy return attr_val def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): return self._module_getattr(attr, attr_val, parameter_proxy_cache) def call_module(self, m, forward, args, kwargs): self.orig_forward = forward return super().call_module(m, forward, args, kwargs) def proxy(self, node): return HFProxy(node, self) def trace( self, root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[Dict[str, Any]] = None, dummy_inputs: Optional[Dict[str, Any]] = None, complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True, ) -> Graph: sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root) if concrete_args is None: concrete_args = {} if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs: for param in sig.parameters.values(): if param.name in dummy_inputs: continue if param.default is inspect.Parameter.empty: raise ValueError(f"You need to specify a default value for the parameter {param.name}.") concrete_args.update( { p.name: p.default for p in sig.parameters.values() if (p.name not in dummy_inputs and p.name not in concrete_args) } ) input_names = sig.parameters.keys() - concrete_args.keys() batch_size = _generate_random_int() sequence_length = _generate_random_int() shape = [batch_size, sequence_length] if root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): num_choices = _generate_random_int(low=2, high=5) shape.insert(1, num_choices) inputs = dict(dummy_inputs) if dummy_inputs is not None else {} for input_name in input_names: if input_name in inputs: continue if isinstance(root, self.supported_archs) or type(root).__qualname__.startswith( ("_deserialize_graph_module", "_CodeOnlyModule") ): inputs.update(self._generate_dummy_input(root, input_name, shape)) else: raise RuntimeError( f"Could not generate input named {input_name} for because root is not a" " transformers.PreTrainedModel." ) concrete_metas = { input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_ for input_name, input_ in inputs.items() } for param in sig.parameters.values(): if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names: concrete_metas[f"**{param.name}"] = {} self.meta_args = concrete_metas self.patched_torch_methods = { target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH } self.orig_fns = set() for name, (wrapper, orig) in self.patched_torch_methods.items(): setattr(torch, name, wrapper) self.orig_fns.add(orig) try: self.graph = super().trace(root, concrete_args=concrete_args) finally: for name, (_, orig) in self.patched_torch_methods.items(): setattr(torch, name, orig) for node in self.graph.nodes: if node.op == "placeholder": if node.target in input_names: node.args = () node.type = torch.Tensor else: to_visit = [node] to_delete = collections.OrderedDict() while to_visit: n = to_visit.pop(0) to_delete[n] = None to_visit += list(n.users.keys()) for user in reversed(to_delete.keys()): self.graph.erase_node(user) if node.op == "output": node.type = None return self.graph def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool: return any(isinstance(attr, Proxy) for attr in mod.__dict__.values()) def _insert_module_as_submodule(self, mod: nn.Module) -> str: if self._stateless_mod_instanciation_depends_on_proxies(mod): return "" idx = 0 mod_name = mod.__class__.__name__.lower() path = f"{mod_name}_{idx}" already_inserted = False while hasattr(self.root, path): if getattr(self.root, path) is mod: already_inserted = True break path = f"{mod_name}_{idx}" idx += 1 if not already_inserted: self.root.add_module(path, mod) return path def path_of_module(self, mod: nn.Module) -> str: try: return super().path_of_module(mod) except NameError as e: if self.allow_insert_stateless_mods and len(list(mod.parameters())) == 0 and len(list(mod.buffers())) == 0: path = self._insert_module_as_submodule(mod) return path raise e def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool: return (not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module( m, module_qualified_name ) @compatibility(is_backward_compatible=True) def keys(self, obj: "Proxy") -> Any: attribute = HFAttribute(obj, "keys")() if obj.node.target == "**kwargs": return attribute._metadata return attribute def get_concrete_args(model: nn.Module, input_names: List[str]): sig = inspect.signature(model.forward) if not (set(input_names) <= set(sig.parameters.keys())): formatted_input_names = input_names[0] if len(input_names) == 1 else ", ".join(input_names) formatted_allowed_input_names = ", ".join(sig.parameters.keys()) raise ValueError( f"The model does not have input(s) named: {formatted_input_names}, expected a subset of the following:" f" {formatted_allowed_input_names}" ) return {p.name: p.default for p in sig.parameters.values() if p.name not in input_names} def check_if_model_is_supported(model: PreTrainedModel): if model.__class__.__name__ not in _SUPPORTED_MODELS: supported_model_names = ", ".join(_SUPPORTED_MODELS) raise NotImplementedError( f"Model {model.__class__.__name__} is not supported yet, supported models: {supported_model_names}" ) def symbolic_trace( model: PreTrainedModel, input_names: Optional[List[str]] = None, disable_check: bool = False, tracer_cls: Type[HFTracer] = HFTracer, ) -> GraphModule: if input_names is None: input_names = model.dummy_inputs.keys() input_names = list(input_names) concrete_args = get_concrete_args(model, input_names) if not disable_check: check_if_model_is_supported(model) tracer = tracer_cls() traced_graph = tracer.trace(model, concrete_args=concrete_args) traced = torch.fx.GraphModule(model, traced_graph) traced.config = model.config traced.class_for_deserialization = model.__class__ traced.device = model.device return traced
2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license generic utilities descriptor that mimics property but caches output in member variable from tensorflowdatasets builtin in functools from python 3 8 see docs python org3howtodescriptor htmlproperties vendored from distutils util convert a string representation of truth to true 1 or false 0 true values are y yes t true on and 1 false values are n no f false off and 0 raises valueerror if val is anything else tries to guess the framework of an object x from its repr brittle but will help in istensor to try the frameworks in a smart order without the need to import the frameworks returns an ordered since we are in python 3 7 dictionary framework to test function which places the framework we can guess from the repr first then numpy then the others we will test this one first then numpy then the others tests if x is a torch tensor tf tensor jaxlib xlaextension devicearray or np ndarray in the order defined by inferframeworkfromrepr this gives us a smart order to test the frameworks with the corresponding tests tracers tests if x is a numpy array or not tests if x is a torch tensor or not safe to call even if torch is not installed tests if x is a torch device or not safe to call even if torch is not installed tests if x is a torch dtype or not safe to call even if torch is not installed tests if x is a tensorflow tensor or not safe to call even if tensorflow is not installed the issymbolictensor predicate is only available starting with tf 2 14 tests if x is a tensorflow symbolic tensor or not ie not eager safe to call even if tensorflow is not installed tests if x is a jax tensor or not safe to call even if jax is not installed convert a tensorflow tensor pytorch tensor numpy array or python list to a python list this gives us a smart order to test the frameworks with the corresponding tests tolist also works on 0d np arrays convert a tensorflow tensor pytorch tensor numpy array or python list to a numpy array this gives us a smart order to test the frameworks with the corresponding tests base class for all model outputs as dataclass has a getitem that allows indexing by integer or slice like a tuple or strings like a dictionary that will ignore the none attributes otherwise behaves like a regular python dictionary tip warningtrue you can t unpack a modeloutput directly use the utils modeloutput totuple method to convert it to a tuple before tip register subclasses as pytree nodes this is necessary to synchronize gradients when using torch nn parallel distributeddataparallel with staticgraphtrue with modules that output modeloutput subclasses subclasses of modeloutput must use the dataclass decorator this check is done in init because the dataclass decorator operates after initsubclass issubclass would return true for issubclassmodeloutput modeloutput when false is needed just need to check that the current class is not modeloutput check the modeloutput dataclass only occurs if dataclass decorator has been used safety and consistency checks if we provided an iterator as first field and the iterator is a key value iterator set the associated fields if we do not have an iterator of keyvalues set it as attribute if we have a mixed iterator raise an error don t call self setitem to avoid recursion errors will raise a keyexception if needed don t call self setattr to avoid recursion errors convert self to a tuple containing all the attributeskeys that are not none enum with more explicit error message for missing values possible values for the padding argument in pretrainedtokenizerbase call useful for tabcompletion in an ide possible values for the returntensors argument in pretrainedtokenizerbase call useful for tabcompletion in an ide wrapper for contextlib exitstack which enters a collection of context managers adaptation of contextmanagers in the fastcore library check if a given model can return loss args modelclass type the class of the model find the labels used by a given model args modelclass type the class of the model flatten a nested dict into a single level dict def flattendictd parentkey delimiter for k v in d items key strparentkey delimiter strk if parentkey else k if v and isinstancev mutablemapping yield from flattendictv key delimiterdelimiter items else yield key v return dictflattendictd parentkey delimiter contextmanager def workingortempdirworkingdir usetempdir bool false if usetempdir with tempfile temporarydirectory as tmpdir yield tmpdir else yield workingdir def transposearray axesnone if isnumpyarrayarray return np transposearray axesaxes elif istorchtensorarray return array t if axes is none else array permuteaxes elif istftensorarray import tensorflow as tf return tf transposearray permaxes elif isjaxtensorarray return jnp transposearray axesaxes else raise valueerrorftype not supported for transpose typearray def reshapearray newshape if isnumpyarrayarray return np reshapearray newshape elif istorchtensorarray return array reshapenewshape elif istftensorarray import tensorflow as tf return tf reshapearray newshape elif isjaxtensorarray return jnp reshapearray newshape else raise valueerrorftype not supported for reshape typearray def squeezearray axisnone if isnumpyarrayarray return np squeezearray axisaxis elif istorchtensorarray return array squeeze if axis is none else array squeezedimaxis elif istftensorarray import tensorflow as tf return tf squeezearray axisaxis elif isjaxtensorarray return jnp squeezearray axisaxis else raise valueerrorftype not supported for squeeze typearray def expanddimsarray axis if isnumpyarrayarray return np expanddimsarray axis elif istorchtensorarray return array unsqueezedimaxis elif istftensorarray import tensorflow as tf return tf expanddimsarray axisaxis elif isjaxtensorarray return jnp expanddimsarray axisaxis else raise valueerrorftype not supported for expanddims typearray def tensorsizearray if isnumpyarrayarray return np sizearray elif istorchtensorarray return array numel elif istftensorarray import tensorflow as tf return tf sizearray elif isjaxtensorarray return array size else raise valueerrorftype not supported for expanddims typearray def addmodelinfotoautomapautomap repoid for key value in automap items if isinstancevalue tuple list automapkey frepoidv if v is not none and not in v else v for v in value elif value is not none and not in value automapkey frepoidvalue return automap def inferframeworkmodelclass for baseclass in inspect getmromodelclass module baseclass module name baseclass name if module startswithtensorflow or module startswithkeras or name tfpretrainedmodel return tf elif module startswithtorch or name pretrainedmodel return pt elif module startswithflax or module startswithjax or name flaxpretrainedmodel return flax else raise typeerrorfcould not infer framework from class modelclass 2022 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license generic utilities descriptor that mimics property but caches output in member variable from tensorflow_datasets built in in functools from python 3 8 see docs python org 3 howto descriptor html properties vendored from distutils util convert a string representation of truth to true 1 or false 0 true values are y yes t true on and 1 false values are n no f false off and 0 raises valueerror if val is anything else tries to guess the framework of an object x from its repr brittle but will help in is_tensor to try the frameworks in a smart order without the need to import the frameworks returns an ordered since we are in python 3 7 dictionary framework to test function which places the framework we can guess from the repr first then numpy then the others we will test this one first then numpy then the others tests if x is a torch tensor tf tensor jaxlib xla_extension devicearray or np ndarray in the order defined by infer_framework_from_repr this gives us a smart order to test the frameworks with the corresponding tests tracers tests if x is a numpy array or not tests if x is a torch tensor or not safe to call even if torch is not installed tests if x is a torch device or not safe to call even if torch is not installed tests if x is a torch dtype or not safe to call even if torch is not installed tests if x is a tensorflow tensor or not safe to call even if tensorflow is not installed the is_symbolic_tensor predicate is only available starting with tf 2 14 tests if x is a tensorflow symbolic tensor or not ie not eager safe to call even if tensorflow is not installed noqa f811 tests if x is a jax tensor or not safe to call even if jax is not installed convert a tensorflow tensor pytorch tensor numpy array or python list to a python list this gives us a smart order to test the frameworks with the corresponding tests tolist also works on 0d np arrays convert a tensorflow tensor pytorch tensor numpy array or python list to a numpy array this gives us a smart order to test the frameworks with the corresponding tests base class for all model outputs as dataclass has a __getitem__ that allows indexing by integer or slice like a tuple or strings like a dictionary that will ignore the none attributes otherwise behaves like a regular python dictionary tip warning true you can t unpack a modeloutput directly use the utils modeloutput to_tuple method to convert it to a tuple before tip register subclasses as pytree nodes this is necessary to synchronize gradients when using torch nn parallel distributeddataparallel with static_graph true with modules that output modeloutput subclasses subclasses of modeloutput must use the dataclass decorator this check is done in __init__ because the dataclass decorator operates after __init_subclass__ issubclass would return true for issubclass modeloutput modeloutput when false is needed just need to check that the current class is not modeloutput check the modeloutput dataclass only occurs if dataclass decorator has been used safety and consistency checks if we provided an iterator as first field and the iterator is a key value iterator set the associated fields if we do not have an iterator of key values set it as attribute if we have a mixed iterator raise an error don t call self __setitem__ to avoid recursion errors will raise a keyexception if needed don t call self __setattr__ to avoid recursion errors convert self to a tuple containing all the attributes keys that are not none enum with more explicit error message for missing values possible values for the padding argument in pretrainedtokenizerbase __call__ useful for tab completion in an ide possible values for the return_tensors argument in pretrainedtokenizerbase __call__ useful for tab completion in an ide wrapper for contextlib exitstack which enters a collection of context managers adaptation of contextmanagers in the fastcore library check if a given model can return loss args model_class type the class of the model tensorflow models pytorch models flax models find the labels used by a given model args model_class type the class of the model tensorflow models pytorch models flax models flatten a nested dict into a single level dict framework agnostic version of numpy transpose that will work on torch tensorflow jax tensors as well as numpy arrays framework agnostic version of numpy reshape that will work on torch tensorflow jax tensors as well as numpy arrays framework agnostic version of numpy squeeze that will work on torch tensorflow jax tensors as well as numpy arrays framework agnostic version of numpy expand_dims that will work on torch tensorflow jax tensors as well as numpy arrays framework agnostic version of numpy size that will work on torch tensorflow jax tensors as well as numpy arrays adds the information of the repo_id to a given auto map infers the framework of a given model without using isinstance because we cannot guarantee that the relevant classes are imported or available
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields, is_dataclass from enum import Enum from typing import Any, ContextManager, Iterable, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class cached_property(property): def __get__(self, obj, objtype=None): if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") attr = "__cached_" + self.fget.__name__ cached = getattr(obj, attr, None) if cached is None: cached = self.fget(obj) setattr(obj, attr, cached) return cached def strtobool(val): val = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(f"invalid truth value {val!r}") def infer_framework_from_repr(x): representation = str(type(x)) if representation.startswith("<class 'torch."): return "pt" elif representation.startswith("<class 'tensorflow."): return "tf" elif representation.startswith("<class 'jax"): return "jax" elif representation.startswith("<class 'numpy."): return "np" def _get_frameworks_and_test_func(x): framework_to_test = { "pt": is_torch_tensor, "tf": is_tf_tensor, "jax": is_jax_tensor, "np": is_numpy_array, } preferred_framework = infer_framework_from_repr(x) frameworks = [] if preferred_framework is None else [preferred_framework] if preferred_framework != "np": frameworks.append("np") frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, "np"]]) return {f: framework_to_test[f] for f in frameworks} def is_tensor(x): framework_to_test_func = _get_frameworks_and_test_func(x) for test_func in framework_to_test_func.values(): if test_func(x): return True if is_torch_fx_proxy(x): return True if is_flax_available(): from jax.core import Tracer if isinstance(x, Tracer): return True return False def _is_numpy(x): return isinstance(x, np.ndarray) def is_numpy_array(x): return _is_numpy(x) def _is_torch(x): import torch return isinstance(x, torch.Tensor) def is_torch_tensor(x): return False if not is_torch_available() else _is_torch(x) def _is_torch_device(x): import torch return isinstance(x, torch.device) def is_torch_device(x): return False if not is_torch_available() else _is_torch_device(x) def _is_torch_dtype(x): import torch if isinstance(x, str): if hasattr(torch, x): x = getattr(torch, x) else: return False return isinstance(x, torch.dtype) def is_torch_dtype(x): return False if not is_torch_available() else _is_torch_dtype(x) def _is_tensorflow(x): import tensorflow as tf return isinstance(x, tf.Tensor) def is_tf_tensor(x): return False if not is_tf_available() else _is_tensorflow(x) def _is_tf_symbolic_tensor(x): import tensorflow as tf if hasattr(tf, "is_symbolic_tensor"): return tf.is_symbolic_tensor(x) return type(x) == tf.Tensor def is_tf_symbolic_tensor(x): return False if not is_tf_available() else _is_tf_symbolic_tensor(x) def _is_jax(x): import jax.numpy as jnp return isinstance(x, jnp.ndarray) def is_jax_tensor(x): return False if not is_flax_available() else _is_jax(x) def to_py_obj(obj): framework_to_py_obj = { "pt": lambda obj: obj.detach().cpu().tolist(), "tf": lambda obj: obj.numpy().tolist(), "jax": lambda obj: np.asarray(obj).tolist(), "np": lambda obj: obj.tolist(), } if isinstance(obj, (dict, UserDict)): return {k: to_py_obj(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return [to_py_obj(o) for o in obj] framework_to_test_func = _get_frameworks_and_test_func(obj) for framework, test_func in framework_to_test_func.items(): if test_func(obj): return framework_to_py_obj[framework](obj) if isinstance(obj, np.number): return obj.tolist() else: return obj def to_numpy(obj): framework_to_numpy = { "pt": lambda obj: obj.detach().cpu().numpy(), "tf": lambda obj: obj.numpy(), "jax": lambda obj: np.asarray(obj), "np": lambda obj: obj, } if isinstance(obj, (dict, UserDict)): return {k: to_numpy(v) for k, v in obj.items()} elif isinstance(obj, (list, tuple)): return np.array(obj) framework_to_test_func = _get_frameworks_and_test_func(obj) for framework, test_func in framework_to_test_func.items(): if test_func(obj): return framework_to_numpy[framework](obj) return obj class ModelOutput(OrderedDict): def __init_subclass__(cls) -> None: if is_torch_available(): _torch_pytree._register_pytree_node( cls, _model_output_flatten, _model_output_unflatten, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) is_modeloutput_subclass = self.__class__ != ModelOutput if is_modeloutput_subclass and not is_dataclass(self): raise TypeError( f"{self.__module__}.{self.__class__.__name__} is not a dataclasss." " This is a subclass of ModelOutput and so must use the @dataclass decorator." ) def __post_init__(self): class_fields = fields(self) if not len(class_fields): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(first_field): if isinstance(first_field, dict): iterator = first_field.items() first_field_iterator = True else: try: iterator = iter(first_field) first_field_iterator = True except TypeError: first_field_iterator = False if first_field_iterator: for idx, element in enumerate(iterator): if ( not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str) ): if idx == 0: self[class_fields[0].name] = first_field else: raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." ) break setattr(self, element[0], element[1]) if element[1] is not None: self[element[0]] = element[1] elif first_field is not None: self[class_fields[0].name] = first_field else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__(self, k): if isinstance(k, str): inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__(self, name, value): if name in self.keys() and value is not None: super().__setitem__(name, value) super().__setattr__(name, value) def __setitem__(self, key, value): super().__setitem__(key, value) super().__setattr__(key, value) def __reduce__(self): if not is_dataclass(self): return super().__reduce__() callable, _args, *remaining = super().__reduce__() args = tuple(getattr(self, field.name) for field in fields(self)) return callable, args, *remaining def to_tuple(self) -> Tuple[Any]: return tuple(self[k] for k in self.keys()) if is_torch_available(): import torch.utils._pytree as _torch_pytree def _model_output_flatten(output: ModelOutput) -> Tuple[List[Any], "_torch_pytree.Context"]: return list(output.values()), (type(output), list(output.keys())) def _model_output_unflatten(values: Iterable[Any], context: "_torch_pytree.Context") -> ModelOutput: output_type, keys = context return output_type(**dict(zip(keys, values))) _torch_pytree._register_pytree_node( ModelOutput, _model_output_flatten, _model_output_unflatten, ) class ExplicitEnum(str, Enum): @classmethod def _missing_(cls, value): raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}" ) class PaddingStrategy(ExplicitEnum): LONGEST = "longest" MAX_LENGTH = "max_length" DO_NOT_PAD = "do_not_pad" class TensorType(ExplicitEnum): PYTORCH = "pt" TENSORFLOW = "tf" NUMPY = "np" JAX = "jax" class ContextManagers: def __init__(self, context_managers: List[ContextManager]): self.context_managers = context_managers self.stack = ExitStack() def __enter__(self): for context_manager in self.context_managers: self.stack.enter_context(context_manager) def __exit__(self, *args, **kwargs): self.stack.__exit__(*args, **kwargs) def can_return_loss(model_class): framework = infer_framework(model_class) if framework == "tf": signature = inspect.signature(model_class.call) elif framework == "pt": signature = inspect.signature(model_class.forward) else: signature = inspect.signature(model_class.__call__) for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def find_labels(model_class): model_name = model_class.__name__ framework = infer_framework(model_class) if framework == "tf": signature = inspect.signature(model_class.call) elif framework == "pt": signature = inspect.signature(model_class.forward) else: signature = inspect.signature(model_class.__call__) if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."): def _flatten_dict(d, parent_key="", delimiter="."): for k, v in d.items(): key = str(parent_key) + delimiter + str(k) if parent_key else k if v and isinstance(v, MutableMapping): yield from flatten_dict(v, key, delimiter=delimiter).items() else: yield key, v return dict(_flatten_dict(d, parent_key, delimiter)) @contextmanager def working_or_temp_dir(working_dir, use_temp_dir: bool = False): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def transpose(array, axes=None): if is_numpy_array(array): return np.transpose(array, axes=axes) elif is_torch_tensor(array): return array.T if axes is None else array.permute(*axes) elif is_tf_tensor(array): import tensorflow as tf return tf.transpose(array, perm=axes) elif is_jax_tensor(array): return jnp.transpose(array, axes=axes) else: raise ValueError(f"Type not supported for transpose: {type(array)}.") def reshape(array, newshape): if is_numpy_array(array): return np.reshape(array, newshape) elif is_torch_tensor(array): return array.reshape(*newshape) elif is_tf_tensor(array): import tensorflow as tf return tf.reshape(array, newshape) elif is_jax_tensor(array): return jnp.reshape(array, newshape) else: raise ValueError(f"Type not supported for reshape: {type(array)}.") def squeeze(array, axis=None): if is_numpy_array(array): return np.squeeze(array, axis=axis) elif is_torch_tensor(array): return array.squeeze() if axis is None else array.squeeze(dim=axis) elif is_tf_tensor(array): import tensorflow as tf return tf.squeeze(array, axis=axis) elif is_jax_tensor(array): return jnp.squeeze(array, axis=axis) else: raise ValueError(f"Type not supported for squeeze: {type(array)}.") def expand_dims(array, axis): if is_numpy_array(array): return np.expand_dims(array, axis) elif is_torch_tensor(array): return array.unsqueeze(dim=axis) elif is_tf_tensor(array): import tensorflow as tf return tf.expand_dims(array, axis=axis) elif is_jax_tensor(array): return jnp.expand_dims(array, axis=axis) else: raise ValueError(f"Type not supported for expand_dims: {type(array)}.") def tensor_size(array): if is_numpy_array(array): return np.size(array) elif is_torch_tensor(array): return array.numel() elif is_tf_tensor(array): import tensorflow as tf return tf.size(array) elif is_jax_tensor(array): return array.size else: raise ValueError(f"Type not supported for expand_dims: {type(array)}.") def add_model_info_to_auto_map(auto_map, repo_id): for key, value in auto_map.items(): if isinstance(value, (tuple, list)): auto_map[key] = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: auto_map[key] = f"{repo_id}--{value}" return auto_map def infer_framework(model_class): for base_class in inspect.getmro(model_class): module = base_class.__module__ name = base_class.__name__ if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch") or name == "PreTrainedModel": return "pt" elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(f"Could not infer framework from class {model_class}.")
codingutf8 2020 optuna hugging face licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license logging utilities import functools import logging import os import sys import threading from logging import critical noqa debug noqa error noqa fatal noqa info noqa notset noqa warn noqa warning noqa from logging import capturewarnings as capturewarnings from typing import optional import huggingfacehub utils as hfhubutils from tqdm import auto as tqdmlib lock threading lock defaulthandler optionallogging handler none loglevels detail logging debug will also print filename and line number debug logging debug info logging info warning logging warning error logging error critical logging critical defaultloglevel logging warning tqdmactive true def getdefaultlogginglevel envlevelstr os getenvtransformersverbosity none if envlevelstr if envlevelstr in loglevels return loglevelsenvlevelstr else logging getlogger warning funknown option transformersverbosityenvlevelstr fhas to be one of joinloglevels keys return defaultloglevel def getlibraryname str return name split 0 def getlibraryrootlogger logging logger return logging getloggergetlibraryname def configurelibraryrootlogger none global defaulthandler with lock if defaulthandler this library has already configured the library root logger return defaulthandler logging streamhandler set sys stderr as stream set defaults based on https github compyinstallerpyinstallerissues7334issuecomment1357447176 if sys stderr is none sys stderr openos devnull w defaulthandler flush sys stderr flush apply our default configuration to the library root logger libraryrootlogger getlibraryrootlogger libraryrootlogger addhandlerdefaulthandler libraryrootlogger setlevelgetdefaultlogginglevel if logging level is debug we add pathname and lineno to formatter for easy debugging if os getenvtransformersverbosity none detail formatter logging formatterlevelnamespathnames linenos asctimes messages defaulthandler setformatterformatter libraryrootlogger propagate false def resetlibraryrootlogger none global defaulthandler with lock if not defaulthandler return libraryrootlogger getlibraryrootlogger libraryrootlogger removehandlerdefaulthandler libraryrootlogger setlevellogging notset defaulthandler none def getloglevelsdict return loglevels def capturewarningscapture logger getloggerpy warnings if not logger handlers logger addhandlerdefaulthandler logger setlevelgetlibraryrootlogger level capturewarningscapture def getloggername optionalstr none logging logger if name is none name getlibraryname configurelibraryrootlogger return logging getloggername def getverbosity int set the verbosity level for the transformers s root logger args verbosity int logging level e g one of transformers logging critical or transformers logging fatal transformers logging error transformers logging warning or transformers logging warn transformers logging info transformers logging debug set the verbosity to the info level return setverbosityinfo def setverbositywarning set the verbosity to the debug level return setverbositydebug def setverbosityerror disable the default handler of the huggingface transformers s root logger configurelibraryrootlogger assert defaulthandler is not none getlibraryrootlogger removehandlerdefaulthandler def enabledefaulthandler none adds a handler to the huggingface transformers s root logger configurelibraryrootlogger assert handler is not none getlibraryrootlogger addhandlerhandler def removehandlerhandler logging handler none disable propagation of the library log outputs note that log propagation is disabled by default enable propagation of the library log outputs please disable the huggingface transformers s default handler to prevent double logging if the root logger has been configured enable explicit formatting for every huggingface transformers s logger the explicit formatter is as follows levelnamefilenameline number time message all handlers currently bound to the root logger are affected by this method resets the formatting for huggingface transformers s loggers all handlers currently bound to the root logger are affected by this method this method is identical to logger warning but if env var transformersnoadvisorywarnings1 is set this warning will not be printed this method is identical to logger warning but will emit the warning with the same message only once note the cache is for the function arguments so 2 different callers using the same arguments will hit the cache the assumption here is that all warning messages are unique across the code if they aren t then need to switch to another type of cache that includes the caller frame information in the hashing function dummy tqdm which doesn t do anything def initself args kwargs pylint disableunusedargument self iterator args0 if args else none def iterself return iterself iterator def getattrself return a boolean indicating whether tqdm progress bars are enabled global tqdmactive return booltqdmactive def enableprogressbar disable tqdm progress bar global tqdmactive tqdmactive false hfhubutils disableprogressbars coding utf 8 2020 optuna hugging face licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license logging utilities noqa noqa noqa noqa noqa noqa noqa noqa will also print filename and line number if transformers_verbosity env var is set to one of the valid choices return that as the new default level if it is not fall back to _default_log_level this library has already configured the library root logger set sys stderr as stream set defaults based on https github com pyinstaller pyinstaller issues 7334 issuecomment 1357447176 apply our default configuration to the library root logger if logging level is debug we add pathname and lineno to formatter for easy debugging calls the capturewarnings method from the logging library to enable management of the warnings emitted by the warnings library read more about this method here https docs python org 3 library logging html integration with the warnings module all warnings will be logged through the py warnings logger careful this method also adds a handler to this logger if it does not already have one and updates the logging level of that logger to the library s root logger return a logger with the specified name this function is not supposed to be directly accessed unless you are writing a custom transformers module return the current level for the transformers s root logger as an int returns int the logging level tip transformers has following logging levels 50 transformers logging critical or transformers logging fatal 40 transformers logging error 30 transformers logging warning or transformers logging warn 20 transformers logging info 10 transformers logging debug tip set the verbosity level for the transformers s root logger args verbosity int logging level e g one of transformers logging critical or transformers logging fatal transformers logging error transformers logging warning or transformers logging warn transformers logging info transformers logging debug set the verbosity to the info level set the verbosity to the warning level set the verbosity to the debug level set the verbosity to the error level disable the default handler of the huggingface transformers s root logger enable the default handler of the huggingface transformers s root logger adds a handler to the huggingface transformers s root logger removes given handler from the huggingface transformers s root logger disable propagation of the library log outputs note that log propagation is disabled by default enable propagation of the library log outputs please disable the huggingface transformers s default handler to prevent double logging if the root logger has been configured enable explicit formatting for every huggingface transformers s logger the explicit formatter is as follows levelname filename line number time message all handlers currently bound to the root logger are affected by this method resets the formatting for huggingface transformers s loggers all handlers currently bound to the root logger are affected by this method this method is identical to logger warning but if env var transformers_no_advisory_warnings 1 is set this warning will not be printed this method is identical to logger warning but will emit the warning with the same message only once note the cache is for the function arguments so 2 different callers using the same arguments will hit the cache the assumption here is that all warning messages are unique across the code if they aren t then need to switch to another type of cache that includes the caller frame information in the hashing function dummy tqdm which doesn t do anything pylint disable unused argument return empty function pylint disable unused argument return a boolean indicating whether tqdm progress bars are enabled enable tqdm progress bar disable tqdm progress bar
import functools import logging import os import sys import threading from logging import ( CRITICAL, DEBUG, ERROR, FATAL, INFO, NOTSET, WARN, WARNING, ) from logging import captureWarnings as _captureWarnings from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "detail": logging.DEBUG, "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING _tqdm_active = True def _get_default_logging_level(): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: return _default_handler = logging.StreamHandler() if sys.stderr is None: sys.stderr = open(os.devnull, "w") _default_handler.flush = sys.stderr.flush library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) if os.getenv("TRANSFORMERS_VERBOSITY", None) == "detail": formatter = logging.Formatter("[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s") _default_handler.setFormatter(formatter) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_log_levels_dict(): return log_levels def captureWarnings(capture): logger = get_logger("py.warnings") if not logger.handlers: logger.addHandler(_default_handler) logger.setLevel(_get_library_root_logger().level) _captureWarnings(capture) def get_logger(name: Optional[str] = None) -> logging.Logger: if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): return set_verbosity(INFO) def set_verbosity_warning(): return set_verbosity(WARNING) def set_verbosity_debug(): return set_verbosity(DEBUG) def set_verbosity_error(): return set_verbosity(ERROR) def disable_default_handler() -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None) def warning_advice(self, *args, **kwargs): no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False) if no_advisory_warnings: return self.warning(*args, **kwargs) logging.Logger.warning_advice = warning_advice @functools.lru_cache(None) def warning_once(self, *args, **kwargs): self.warning(*args, **kwargs) logging.Logger.warning_once = warning_once class EmptyTqdm: def __init__(self, *args, **kwargs): self._iterator = args[0] if args else None def __iter__(self): return iter(self._iterator) def __getattr__(self, _): def empty_fn(*args, **kwargs): return return empty_fn def __enter__(self): return self def __exit__(self, type_, value, traceback): return class _tqdm_cls: def __call__(self, *args, **kwargs): if _tqdm_active: return tqdm_lib.tqdm(*args, **kwargs) else: return EmptyTqdm(*args, **kwargs) def set_lock(self, *args, **kwargs): self._lock = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*args, **kwargs) def get_lock(self): if _tqdm_active: return tqdm_lib.tqdm.get_lock() tqdm = _tqdm_cls() def is_progress_bar_enabled() -> bool: global _tqdm_active return bool(_tqdm_active) def enable_progress_bar(): global _tqdm_active _tqdm_active = True hf_hub_utils.enable_progress_bars() def disable_progress_bar(): global _tqdm_active _tqdm_active = False hf_hub_utils.disable_progress_bars()
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license duplicate check missing blocks returns a dictionary of layers distributed evenly across all devices layers listrangenlayers nblocks intceilnlayers lendevices layerslist layersi i nblocks for i in range0 nlayers nblocks return dictzipdevices layerslist coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license duplicate check missing blocks returns a dictionary of layers distributed evenly across all devices
from math import ceil def assert_device_map(device_map, num_blocks): blocks = list(range(0, num_blocks)) device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] duplicate_blocks = [] for i in device_map_blocks: if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: duplicate_blocks.append(i) missing_blocks = [i for i in blocks if i not in device_map_blocks] extra_blocks = [i for i in device_map_blocks if i not in blocks] if len(duplicate_blocks) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(duplicate_blocks) ) if len(missing_blocks) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(missing_blocks) ) if len(extra_blocks) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(extra_blocks) ) def get_device_map(n_layers, devices): layers = list(range(n_layers)) n_blocks = int(ceil(n_layers / len(devices))) layers_list = [layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)] return dict(zip(devices, layers_list))
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license adaptercachedfilename none if modelid is none return none elif os path isdirmodelid listremotefiles os listdirmodelid if adapterconfigname in listremotefiles adaptercachedfilename os path joinmodelid adapterconfigname else adaptercachedfilename cachedfile modelid adapterconfigname cachedircachedir forcedownloadforcedownload resumedownloadresumedownload proxiesproxies tokentoken revisionrevision localfilesonlylocalfilesonly subfoldersubfolder commithashcommithash raiseexceptionsformissingentriesfalse raiseexceptionsforconnectionerrorsfalse return adaptercachedfilename def checkpeftversionminversion str none r checks if the version of peft is compatible args version str the version of peft to check against 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license simply checks if the model stored on the hub or locally is an adapter model or not return the path of the adapter config file if it is none otherwise args model_id str the identifier of the model to look for can be either a local path or an id to the repository on the hub cache_dir str or os pathlike optional path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used force_download bool optional defaults to false whether or not to force to re download the configuration files and override the cached versions if they exist resume_download bool optional defaults to false whether or not to delete incompletely received file attempts to resume the download if such a file exists proxies dict str str optional a dictionary of proxy servers to use by protocol or endpoint e g http foo bar 3128 http hostname foo bar 4012 the proxies are used on each request token str or bool optional the token to use as http bearer ization for remote files if true will use the token generated when running huggingface cli login stored in huggingface revision str optional defaults to main the specific model version to use it can be a branch name a tag name or a commit id since we use a git based system for storing models and other artifacts on huggingface co so revision can be any identifier allowed by git tip to test a pull request you made on the hub you can pass revision refs pr pr_number tip local_files_only bool optional defaults to false if true will only try to load the tokenizer configuration from local files subfolder str optional defaults to in case the relevant files are located inside a subfolder of the model repo on huggingface co you can specify the folder name here checks if the version of peft is compatible args version str the version of peft to check against
import importlib import os from typing import Dict, Optional, Union from packaging import version from .hub import cached_file from .import_utils import is_peft_available ADAPTER_CONFIG_NAME = "adapter_config.json" ADAPTER_WEIGHTS_NAME = "adapter_model.bin" ADAPTER_SAFE_WEIGHTS_NAME = "adapter_model.safetensors" def find_adapter_config_file( model_id: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", _commit_hash: Optional[str] = None, ) -> Optional[str]: r adapter_cached_filename = None if model_id is None: return None elif os.path.isdir(model_id): list_remote_files = os.listdir(model_id) if ADAPTER_CONFIG_NAME in list_remote_files: adapter_cached_filename = os.path.join(model_id, ADAPTER_CONFIG_NAME) else: adapter_cached_filename = cached_file( model_id, ADAPTER_CONFIG_NAME, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _commit_hash=_commit_hash, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, ) return adapter_cached_filename def check_peft_version(min_version: str) -> None: r if not is_peft_available(): raise ValueError("PEFT is not installed. Please install it with `pip install peft`") is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) >= version.parse(min_version) if not is_peft_version_compatible: raise ValueError( f"The version of PEFT you are using is not compatible, please use a version that is greater" f" than {min_version}" )
usrbinenv python codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license mixin class for quantization config instantiates a quantizationconfigmixin from a python dictionary of parameters args configdict dictstr any dictionary that will be used to instantiate the configuration object returnunusedkwargs bool optional defaults to false whether or not to return a list of unused keyword arguments used for frompretrained method in pretrainedmodel kwargs dictstr any additional parameters from which to initialize the configuration object returns quantizationconfigmixin the configuration object instantiated from those parameters save this instance to a json file args jsonfilepath str or os pathlike path to the json file in which this configuration instance s parameters will be saved usediff bool optional defaults to true if set to true only the difference between the config instance and the default quantizationconfig is serialized to json file serializes this instance to a python dictionary returns dictstr any dictionary of all the attributes that make up this configuration instance serializes this instance to a json string args usediff bool optional defaults to true if set to true only the difference between the config instance and the default pretrainedconfig is serialized to json string returns str string containing all the attributes that make up this configuration instance in json format this is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using bitsandbytes this replaces loadin8bit or loadin4bittherefore both options are mutually exclusive currently only supports llm int8 fp4 and nf4 quantization if more methods are added to bitsandbytes then more arguments will be added to this class args loadin8bit bool optional defaults to false this flag is used to enable 8bit quantization with llm int8 loadin4bit bool optional defaults to false this flag is used to enable 4bit quantization by replacing the linear layers with fp4nf4 layers from bitsandbytes llmint8threshold float optional defaults to 6 0 this corresponds to the outlier threshold for outlier detection as described in llm int8 8bit matrix multiplication for transformers at scale paper https arxiv orgabs2208 07339 any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16 values are usually normally distributed that is most values are in the range 3 5 3 5 but there are some exceptional systematic outliers that are very differently distributed for large models these outliers are often in the interval 60 6 or 6 60 int8 quantization works well for values of magnitude 5 but beyond that there is a significant performance penalty a good default threshold is 6 but a lower threshold might be needed for more unstable models small models finetuning llmint8skipmodules liststr optional an explicit list of the modules that we do not want to convert in 8bit this is useful for models such as jukebox that has several heads in different places and not necessarily at the last position for example for causallm models the last lmhead is kept in its original dtype llmint8enablefp32cpuoffload bool optional defaults to false this flag is used for advanced use cases and users that are aware of this feature if you want to split your model in different parts and run some parts in int8 on gpu and some parts in fp32 on cpu you can use this flag this is useful for offloading large models such as googleflant5xxl note that the int8 operations will not be run on cpu llmint8hasfp16weight bool optional defaults to false this flag runs llm int8 with 16bit main weights this is useful for finetuning as the weights do not have to be converted back and forth for the backward pass bnb4bitcomputedtype torch dtype or str optional defaults to torch float32 this sets the computational type which might be different than the input time for example inputs might be fp32 but computation can be set to bf16 for speedups bnb4bitquanttype str optional defaults to fp4 this sets the quantization data type in the bnb nn linear4bit layers options are fp4 and nf4 data types which are specified by fp4 or nf4 bnb4bitusedoublequant bool optional defaults to false this flag is used for nested quantization where the quantization constants from the first quantization are quantized again kwargs dictstr any optional additional parameters from which to initialize the configuration object if not isinstanceself llmint8threshold float raise valueerrorllmint8threshold must be a float if self llmint8skipmodules is not none and not isinstanceself llmint8skipmodules list raise valueerrorllmint8skipmodules must be a list of strings if not isinstanceself llmint8enablefp32cpuoffload bool raise valueerrorllmint8enablefp32cpuoffload must be a boolean if not isinstanceself llmint8hasfp16weight bool raise valueerrorllmint8hasfp16weight must be a boolean if self bnb4bitcomputedtype is not none and not isinstanceself bnb4bitcomputedtype torch dtype raise valueerrorbnb4bitcomputedtype must be torch dtype if not isinstanceself bnb4bitquanttype str raise valueerrorbnb4bitquanttype must be a string if not isinstanceself bnb4bitusedoublequant bool raise valueerrorbnb4bitusedoublequant must be a boolean if self loadin4bit and not version parseimportlib metadata versionbitsandbytes version parse 0 39 0 raise valueerror 4 bit quantization requires bitsandbytes0 39 0 please upgrade your bitsandbytes version def isquantizableself r returns true if the model is quantizable false otherwise if self loadin8bit return llmint8 elif self loadin4bit and self bnb4bitquanttype fp4 return fp4 elif self loadin4bit and self bnb4bitquanttype nf4 return nf4 else return none def todictself dictstr any output copy deepcopyself dict outputbnb4bitcomputedtype stroutputbnb4bitcomputedtype split 1 return output def reprself configdict self todict return fself class name json dumpsconfigdict indent2 sortkeystruen def todiffdictself dictstr any configdict self todict get the default config dict defaultconfigdict bitsandbytesconfig todict serializableconfigdict only serialize values that differ from the default config for key value in configdict items if value defaultconfigdictkey serializableconfigdictkey value return serializableconfigdict class exllamaversionint enum one 1 two 2 dataclass class gptqconfigquantizationconfigmixin def init self bits int tokenizer any none dataset optionalunionliststr str none groupsize int 128 damppercent float 0 1 descact bool false sym bool true truesequential bool true usecudafp16 bool false modelseqlen optionalint none blocknametoquantize optionalstr none modulenameprecedingfirstblock optionalliststr none batchsize int 1 padtokenid optionalint none useexllama optionalbool none maxinputlength optionalint none exllamaconfig optionaldictstr any none cacheblockoutputs bool true kwargs self quantmethod quantizationmethod gptq self bits bits self tokenizer tokenizer self dataset dataset self groupsize groupsize self damppercent damppercent self descact descact self sym sym self truesequential truesequential self usecudafp16 usecudafp16 self modelseqlen modelseqlen self blocknametoquantize blocknametoquantize self modulenameprecedingfirstblock modulenameprecedingfirstblock self batchsize batchsize self padtokenid padtokenid self useexllama useexllama self maxinputlength maxinputlength self exllamaconfig exllamaconfig self disableexllama kwargs popdisableexllama none self cacheblockoutputs cacheblockoutputs self postinit def getloadingattributesself attibutesdict copy deepcopyself dict loadingattibutes disableexllama useexllama exllamaconfig usecudafp16 maxinputlength loadingattibutesdict i j for i j in attibutesdict items if i in loadingattibutes return loadingattibutesdict def postinitself r safety checker that arguments are correct new default behaviour follow pattern of old config only happens if user explicitly passes in both arguments get compatible dict for optimum gptq config make it compatible with optimum config get compatible class with optimum gptq config dict switch to none to not trigger the warning this is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using autoawq library awq quantization relying on autoawq backend args bits int optional defaults to 4 the number of bits to quantize to groupsize int optional defaults to 128 the group size to use for quantization recommended value is 128 and 1 uses percolumn quantization zeropoint bool optional defaults to true whether to use zero point quantization version awqlinearversion optional defaults to awqlinearversion gemm the version of the quantization algorithm to use gemm is better for big batchsize e g 8 otherwise gemv is better e g 8 backend awqbackendpackingmethod optional defaults to awqbackendpackingmethod autoawq the quantization backend some models might be quantized using llmawq backend this is useful for users that quantize their own models using llmawq library if not torch cuda isavailable raise valueerrorawq is only available on gpu if self backend not in awqbackendpackingmethod autoawq awqbackendpackingmethod llmawq raise valueerror fonly supported quantization backends in awqbackendpackingmethod autoawq and awqbackendpackingmethod llmawq not recognized backend self backend self version awqlinearversion fromstrself version if self version not in awqlinearversion gemm awqlinearversion gemv raise valueerror fonly supported versions are in awqlinearversion gemm awqlinearversion gemv not recognized version self version if self backend awqbackendpackingmethod llmawq computecapability torch cuda getdevicecapability major minor computecapability if major 8 raise valueerrorllmawq backend is only supported on gpus with compute capability 8 0 usr bin env python coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license mixin class for quantization config instantiates a quantizationconfigmixin from a python dictionary of parameters args config_dict dict str any dictionary that will be used to instantiate the configuration object return_unused_kwargs bool optional defaults to false whether or not to return a list of unused keyword arguments used for from_pretrained method in pretrainedmodel kwargs dict str any additional parameters from which to initialize the configuration object returns quantizationconfigmixin the configuration object instantiated from those parameters save this instance to a json file args json_file_path str or os pathlike path to the json file in which this configuration instance s parameters will be saved use_diff bool optional defaults to true if set to true only the difference between the config instance and the default quantizationconfig is serialized to json file serializes this instance to a python dictionary returns dict str any dictionary of all the attributes that make up this configuration instance serializes this instance to a json string args use_diff bool optional defaults to true if set to true only the difference between the config instance and the default pretrainedconfig is serialized to json string returns str string containing all the attributes that make up this configuration instance in json format this is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using bitsandbytes this replaces load_in_8bit or load_in_4bit therefore both options are mutually exclusive currently only supports llm int8 fp4 and nf4 quantization if more methods are added to bitsandbytes then more arguments will be added to this class args load_in_8bit bool optional defaults to false this flag is used to enable 8 bit quantization with llm int8 load_in_4bit bool optional defaults to false this flag is used to enable 4 bit quantization by replacing the linear layers with fp4 nf4 layers from bitsandbytes llm_int8_threshold float optional defaults to 6 0 this corresponds to the outlier threshold for outlier detection as described in llm int8 8 bit matrix multiplication for transformers at scale paper https arxiv org abs 2208 07339 any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16 values are usually normally distributed that is most values are in the range 3 5 3 5 but there are some exceptional systematic outliers that are very differently distributed for large models these outliers are often in the interval 60 6 or 6 60 int8 quantization works well for values of magnitude 5 but beyond that there is a significant performance penalty a good default threshold is 6 but a lower threshold might be needed for more unstable models small models fine tuning llm_int8_skip_modules list str optional an explicit list of the modules that we do not want to convert in 8 bit this is useful for models such as jukebox that has several heads in different places and not necessarily at the last position for example for causallm models the last lm_head is kept in its original dtype llm_int8_enable_fp32_cpu_offload bool optional defaults to false this flag is used for advanced use cases and users that are aware of this feature if you want to split your model in different parts and run some parts in int8 on gpu and some parts in fp32 on cpu you can use this flag this is useful for offloading large models such as google flan t5 xxl note that the int8 operations will not be run on cpu llm_int8_has_fp16_weight bool optional defaults to false this flag runs llm int8 with 16 bit main weights this is useful for fine tuning as the weights do not have to be converted back and forth for the backward pass bnb_4bit_compute_dtype torch dtype or str optional defaults to torch float32 this sets the computational type which might be different than the input time for example inputs might be fp32 but computation can be set to bf16 for speedups bnb_4bit_quant_type str optional defaults to fp4 this sets the quantization data type in the bnb nn linear4bit layers options are fp4 and nf4 data types which are specified by fp4 or nf4 bnb_4bit_use_double_quant bool optional defaults to false this flag is used for nested quantization where the quantization constants from the first quantization are quantized again kwargs dict str any optional additional parameters from which to initialize the configuration object safety checker that arguments are correct also replaces some nonetype arguments with their default values returns true if the model is quantizable false otherwise this method returns the quantization method used for the model if the model is not quantizable it returns none serializes this instance to a python dictionary returns dict str any dictionary of all the attributes that make up this configuration instance removes all attributes from config which correspond to the default config attributes for better readability and serializes to a python dictionary returns dict str any dictionary of all the attributes that make up this configuration instance get the default config dict only serialize values that differ from the default config this is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using optimum api for gptq quantization relying on auto_gptq backend args bits int the number of bits to quantize to supported numbers are 2 3 4 8 tokenizer str or pretrainedtokenizerbase optional the tokenizer used to process the dataset you can pass either a custom tokenizer object a string the model id of a predefined tokenizer hosted inside a model repo on huggingface co valid model ids can be located at the root level like bert base uncased or namespaced under a user or organization name like dbmdz bert base german cased a path to a directory containing vocabulary files required by the tokenizer for instance saved using the pretrainedtokenizer save_pretrained method e g my_model_directory dataset union list str optional the dataset used for quantization you can provide your own dataset in a list of string or just use the original datasets used in gptq paper wikitext2 c4 c4 new ptb ptb new group_size int optional defaults to 128 the group size to use for quantization recommended value is 128 and 1 uses per column quantization damp_percent float optional defaults to 0 1 the percent of the average hessian diagonal to use for dampening recommended value is 0 1 desc_act bool optional defaults to false whether to quantize columns in order of decreasing activation size setting it to false can significantly speed up inference but the perplexity may become slightly worse also known as act order sym bool optional defaults to true whether to use symetric quantization true_sequential bool optional defaults to true whether to perform sequential quantization even within a single transformer block instead of quantizing the entire block at once we perform layer wise quantization as a result each layer undergoes quantization using inputs that have passed through the previously quantized layers use_cuda_fp16 bool optional defaults to false whether or not to use optimized cuda kernel for fp16 model need to have model in fp16 model_seqlen int optional the maximum sequence length that the model can take block_name_to_quantize str optional the transformers block name to quantize module_name_preceding_first_block list str optional the layers that are preceding the first transformer block batch_size int optional defaults to 1 the batch size used when processing the dataset pad_token_id int optional the pad token id needed to prepare the dataset when batch_size 1 use_exllama bool optional whether to use exllama backend defaults to true if unset only works with bits 4 max_input_length int optional the maximum input length this is needed to initialize a buffer that depends on the maximum expected input length it is specific to the exllama backend with act order exllama_config dict str any optional the exllama config you can specify the version of the exllama kernel through the version key defaults to version 1 if unset cache_block_outputs bool optional defaults to true whether to cache block outputs to reuse as inputs for the succeeding block safety checker that arguments are correct you have entered a string value for dataset you can only choose between wikitext2 c4 c4 new ptb ptb new but we found self dataset dataset needs to be either a list of string or a value in wikitext2 c4 c4 new ptb ptb new but we found self dataset new default behaviour follow pattern of old config only happens if user explicitly passes in both arguments get compatible dict for optimum gptq config make it compatible with optimum config get compatible class with optimum gptq config dict switch to none to not trigger the warning this is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using auto awq library awq quantization relying on auto_awq backend args bits int optional defaults to 4 the number of bits to quantize to group_size int optional defaults to 128 the group size to use for quantization recommended value is 128 and 1 uses per column quantization zero_point bool optional defaults to true whether to use zero point quantization version awqlinearversion optional defaults to awqlinearversion gemm the version of the quantization algorithm to use gemm is better for big batch_size e g 8 otherwise gemv is better e g 8 backend awqbackendpackingmethod optional defaults to awqbackendpackingmethod autoawq the quantization backend some models might be quantized using llm awq backend this is useful for users that quantize their own models using llm awq library safety checker that arguments are correct
import copy import importlib.metadata import json import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Optional, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch logger = logging.get_logger(__name__) class QuantizationMethod(str, Enum): BITS_AND_BYTES = "bitsandbytes" GPTQ = "gptq" AWQ = "awq" class AWQLinearVersion(str, Enum): GEMM = "gemm" GEMV = "gemv" @staticmethod def from_str(version: str): version = version.lower() if version == "gemm": return AWQLinearVersion.GEMM elif version == "gemv": return AWQLinearVersion.GEMV else: raise ValueError(f"Unknown AWQLinearVersion {version}") class AwqBackendPackingMethod(str, Enum): AUTOAWQ = "autoawq" LLMAWQ = "llm-awq" @dataclass class QuantizationConfigMixin: quant_method: QuantizationMethod @classmethod def from_dict(cls, config_dict, return_unused_kwargs=False, **kwargs): config = cls(**config_dict) to_remove = [] for key, value in kwargs.items(): if hasattr(config, key): setattr(config, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) if return_unused_kwargs: return config, kwargs else: return config def to_json_file(self, json_file_path: Union[str, os.PathLike]): with open(json_file_path, "w", encoding="utf-8") as writer: config_dict = self.to_dict() json_string = json.dumps(config_dict, indent=2, sort_keys=True) + "\n" writer.write(json_string) def to_dict(self) -> Dict[str, Any]: return copy.deepcopy(self.__dict__) def __repr__(self): return f"{self.__class__.__name__} {self.to_json_string()}" def to_json_string(self, use_diff: bool = True) -> str: if use_diff is True: config_dict = self.to_diff_dict() else: config_dict = self.to_dict() return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" @dataclass class BitsAndBytesConfig(QuantizationConfigMixin): def __init__( self, load_in_8bit=False, load_in_4bit=False, llm_int8_threshold=6.0, llm_int8_skip_modules=None, llm_int8_enable_fp32_cpu_offload=False, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=None, bnb_4bit_quant_type="fp4", bnb_4bit_use_double_quant=False, **kwargs, ): self.quant_method = QuantizationMethod.BITS_AND_BYTES self.load_in_8bit = load_in_8bit self.load_in_4bit = load_in_4bit self.llm_int8_threshold = llm_int8_threshold self.llm_int8_skip_modules = llm_int8_skip_modules self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload self.llm_int8_has_fp16_weight = llm_int8_has_fp16_weight self.bnb_4bit_quant_type = bnb_4bit_quant_type self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant if bnb_4bit_compute_dtype is None: self.bnb_4bit_compute_dtype = torch.float32 elif isinstance(bnb_4bit_compute_dtype, str): self.bnb_4bit_compute_dtype = getattr(torch, bnb_4bit_compute_dtype) elif isinstance(bnb_4bit_compute_dtype, torch.dtype): self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") self.post_init() def post_init(self): r if not isinstance(self.llm_int8_threshold, float): raise ValueError("llm_int8_threshold must be a float") if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list): raise ValueError("llm_int8_skip_modules must be a list of strings") if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean") if not isinstance(self.llm_int8_has_fp16_weight, bool): raise ValueError("llm_int8_has_fp16_weight must be a boolean") if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype") if not isinstance(self.bnb_4bit_quant_type, str): raise ValueError("bnb_4bit_quant_type must be a string") if not isinstance(self.bnb_4bit_use_double_quant, bool): raise ValueError("bnb_4bit_use_double_quant must be a boolean") if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def is_quantizable(self): r return self.load_in_8bit or self.load_in_4bit def quantization_method(self): r if self.load_in_8bit: return "llm_int8" elif self.load_in_4bit and self.bnb_4bit_quant_type == "fp4": return "fp4" elif self.load_in_4bit and self.bnb_4bit_quant_type == "nf4": return "nf4" else: return None def to_dict(self) -> Dict[str, Any]: output = copy.deepcopy(self.__dict__) output["bnb_4bit_compute_dtype"] = str(output["bnb_4bit_compute_dtype"]).split(".")[1] return output def __repr__(self): config_dict = self.to_dict() return f"{self.__class__.__name__} {json.dumps(config_dict, indent=2, sort_keys=True)}\n" def to_diff_dict(self) -> Dict[str, Any]: config_dict = self.to_dict() default_config_dict = BitsAndBytesConfig().to_dict() serializable_config_dict = {} for key, value in config_dict.items(): if value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict class ExllamaVersion(int, Enum): ONE = 1 TWO = 2 @dataclass class GPTQConfig(QuantizationConfigMixin): def __init__( self, bits: int, tokenizer: Any = None, dataset: Optional[Union[List[str], str]] = None, group_size: int = 128, damp_percent: float = 0.1, desc_act: bool = False, sym: bool = True, true_sequential: bool = True, use_cuda_fp16: bool = False, model_seqlen: Optional[int] = None, block_name_to_quantize: Optional[str] = None, module_name_preceding_first_block: Optional[List[str]] = None, batch_size: int = 1, pad_token_id: Optional[int] = None, use_exllama: Optional[bool] = None, max_input_length: Optional[int] = None, exllama_config: Optional[Dict[str, Any]] = None, cache_block_outputs: bool = True, **kwargs, ): self.quant_method = QuantizationMethod.GPTQ self.bits = bits self.tokenizer = tokenizer self.dataset = dataset self.group_size = group_size self.damp_percent = damp_percent self.desc_act = desc_act self.sym = sym self.true_sequential = true_sequential self.use_cuda_fp16 = use_cuda_fp16 self.model_seqlen = model_seqlen self.block_name_to_quantize = block_name_to_quantize self.module_name_preceding_first_block = module_name_preceding_first_block self.batch_size = batch_size self.pad_token_id = pad_token_id self.use_exllama = use_exllama self.max_input_length = max_input_length self.exllama_config = exllama_config self.disable_exllama = kwargs.pop("disable_exllama", None) self.cache_block_outputs = cache_block_outputs self.post_init() def get_loading_attributes(self): attibutes_dict = copy.deepcopy(self.__dict__) loading_attibutes = ["disable_exllama", "use_exllama", "exllama_config", "use_cuda_fp16", "max_input_length"] loading_attibutes_dict = {i: j for i, j in attibutes_dict.items() if i in loading_attibutes} return loading_attibutes_dict def post_init(self): r if self.bits not in [2, 3, 4, 8]: raise ValueError(f"Only support quantization to [2,3,4,8] bits but found {self.bits}") if self.group_size != -1 and self.group_size <= 0: raise ValueError("group_size must be greater than 0 or equal to -1") if not (0 < self.damp_percent < 1): raise ValueError("damp_percent must between 0 and 1.") if self.dataset is not None: if isinstance(self.dataset, str): if self.dataset not in ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"]: raise ValueError( f ) elif not isinstance(self.dataset, list): raise ValueError( f ) if self.disable_exllama is None and self.use_exllama is None: self.use_exllama = True elif self.disable_exllama is not None and self.use_exllama is None: logger.warning( "Using `disable_exllama` is deprecated and will be removed in version 4.37. Use `use_exllama` instead and specify the version with `exllama_config`." "The value of `use_exllama` will be overwritten by `disable_exllama` passed in `GPTQConfig` or stored in your config file." ) self.use_exllama = not self.disable_exllama self.disable_exllama = None elif self.disable_exllama is not None and self.use_exllama is not None: raise ValueError("Cannot specify both `disable_exllama` and `use_exllama`. Please use just `use_exllama`") if self.exllama_config is None: self.exllama_config = {"version": ExllamaVersion.ONE} else: if "version" not in self.exllama_config: raise ValueError("`exllama_config` needs to have a `version` key.") elif self.exllama_config["version"] not in [ExllamaVersion.ONE, ExllamaVersion.TWO]: exllama_version = self.exllama_config["version"] raise ValueError( f"Only supported versions are in [ExllamaVersion.ONE, ExllamaVersion.TWO] - not recognized version {exllama_version}" ) if self.bits == 4 and self.use_exllama: if self.exllama_config["version"] == ExllamaVersion.ONE: logger.info( "You have activated exllama backend. Note that you can get better inference " "speed using exllamav2 kernel by setting `exllama_config`." ) elif self.exllama_config["version"] == ExllamaVersion.TWO: optimum_version = version.parse(importlib.metadata.version("optimum")) autogptq_version = version.parse(importlib.metadata.version("auto_gptq")) if optimum_version <= version.parse("1.13.2") or autogptq_version <= version.parse("0.4.2"): raise ValueError( f"You need optimum > 1.13.2 and auto-gptq > 0.4.2 . Make sure to have that version installed - detected version : optimum {optimum_version} and autogptq {autogptq_version}" ) def to_dict(self): config_dict = super().to_dict() config_dict.pop("disable_exllama", None) return config_dict def to_dict_optimum(self): quant_dict = self.to_dict() quant_dict["disable_exllama"] = not self.use_exllama return quant_dict @classmethod def from_dict_optimum(cls, config_dict): if "disable_exllama" in config_dict: config_dict["use_exllama"] = not config_dict["disable_exllama"] config_dict["disable_exllama"] = None config = cls(**config_dict) return config @dataclass class AwqConfig(QuantizationConfigMixin): def __init__( self, bits: int = 4, group_size: int = 128, zero_point: bool = True, version: AWQLinearVersion = AWQLinearVersion.GEMM, backend: AwqBackendPackingMethod = AwqBackendPackingMethod.AUTOAWQ, **kwargs, ): self.quant_method = QuantizationMethod.AWQ self.bits = bits self.group_size = group_size self.zero_point = zero_point self.version = version self.backend = backend self.post_init() def post_init(self): r if not torch.cuda.is_available(): raise ValueError("AWQ is only available on GPU") if self.backend not in [AwqBackendPackingMethod.AUTOAWQ, AwqBackendPackingMethod.LLMAWQ]: raise ValueError( f"Only supported quantization backends in {AwqBackendPackingMethod.AUTOAWQ} and {AwqBackendPackingMethod.LLMAWQ} - not recognized backend {self.backend}" ) self.version = AWQLinearVersion.from_str(self.version) if self.version not in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV]: raise ValueError( f"Only supported versions are in [AWQLinearVersion.GEMM, AWQLinearVersion.GEMV] - not recognized version {self.version}" ) if self.backend == AwqBackendPackingMethod.LLMAWQ: compute_capability = torch.cuda.get_device_capability() major, minor = compute_capability if major < 8: raise ValueError("LLM-AWQ backend is only supported on GPUs with compute capability >= 8.0")
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license utilities for working with package versions perform a runtime check of the dependency versions using the exact same syntax used by pip the installed module version comes from the sitepackages dir via importlib metadata args requirement str pip style definition e g tokenizers0 9 4 tqdm4 27 numpy hint str optional what suggestion to print in case of requirements not being met example python requireversionpandas1 1 2 requireversionnumpy1 18 5 this is important to have for whatever reason hint fnhint if hint is not none else nonversioned check if re matchrwd requirement pkg op wantver requirement none none else match re findallr ss 1 2 requirement if not match raise valueerror requirement needs to be in the pip package format e g packagea1 23 or packageb1 23 but f got requirement pkg wantfull match0 wantrange wantfull split there could be multiple requirements wanted for w in wantrange match re findallrs 1 2 w if not match raise valueerror requirement needs to be in the pip package format e g packagea1 23 or packageb1 23 f but got requirement op wantver match0 wantedop wantver if op not in ops raise valueerrorfrequirement need one of listops keys but got op special case if pkg python gotver joinstrx for x in sys versioninfo 3 for op wantver in wanted items compareversionsop gotver wantver requirement pkg hint return check if any version is installed try gotver importlib metadata versionpkg except importlib metadata packagenotfounderror raise importlib metadata packagenotfounderror fthe requirement distribution was not found and is required by this application hint check that the right version is installed if version number or a range was provided if wantver is not none for op wantver in wanted items compareversionsop gotver wantver requirement pkg hint def requireversioncorerequirement 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license utilities for working with package versions perform a runtime check of the dependency versions using the exact same syntax used by pip the installed module version comes from the site packages dir via importlib metadata args requirement str pip style definition e g tokenizers 0 9 4 tqdm 4 27 numpy hint str optional what suggestion to print in case of requirements not being met example python require_version pandas 1 1 2 require_version numpy 1 18 5 this is important to have for whatever reason non versioned check there could be multiple requirements special case check if any version is installed check that the right version is installed if version number or a range was provided require_version wrapper which emits a core specific hint on failure
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version ops = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): if got_ver is None or want_ver is None: raise ValueError( f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" f" reinstalling {pkg}." ) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise ImportError( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def require_version(requirement: str, hint: Optional[str] = None) -> None: hint = f"\n{hint}" if hint is not None else "" if re.match(r"^[\w_\-\d]+$", requirement): pkg, op, want_ver = requirement, None, None else: match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" f" got {requirement}" ) pkg, want_full = match[0] want_range = want_full.split(",") wanted = {} for w in want_range: match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," f" but got {requirement}" ) op, want_ver = match[0] wanted[op] = want_ver if op not in ops: raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") if pkg == "python": got_ver = ".".join([str(x) for x in sys.version_info[:3]]) for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) return try: got_ver = importlib.metadata.version(pkg) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"The '{requirement}' distribution was not found and is required by this application. {hint}" ) if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) def require_version_core(requirement): hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(requirement, hint)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license set architectures equal to none 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license set architectures equal to none
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class BenchmarkTest(unittest.TestCase): def check_results_dict_not_empty(self, results): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]): result = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(result) def test_inference_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_only_pretrain(self): MODEL_ID = "sgugger/tiny-distilbert-classification" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, only_pretrain_model=True, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_torchscript(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, torchscript=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_inference_fp16(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, fp16=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_model_no_architectures(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) config.architectures = None benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_train_no_configs_fp16(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], fp16=True, multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_inference_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_encoder_decoder_with_configs(self): MODEL_ID = "sshleifer/tinier_bart" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_train_encoder_decoder_with_configs(self): MODEL_ID = "sshleifer/tinier_bart" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_save_csv_files(self): MODEL_ID = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, save_to_csv=True, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"), train_memory_csv_file=os.path.join(tmp_dir, "train_mem.csv"), inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"), train_time_csv_file=os.path.join(tmp_dir, "train_time.csv"), env_info_csv_file=os.path.join(tmp_dir, "env.csv"), multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) benchmark.run() self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "train_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "train_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists()) def test_trace_memory(self): MODEL_ID = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(summary): self.assertTrue(hasattr(summary, "sequential")) self.assertTrue(hasattr(summary, "cumulative")) self.assertTrue(hasattr(summary, "current")) self.assertTrue(hasattr(summary, "total")) with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = PyTorchBenchmarkArguments( models=[MODEL_ID], training=True, inference=True, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(tmp_dir, "log.txt"), log_print=True, trace_memory_line_by_line=True, multi_process=False, ) benchmark = PyTorchBenchmark(benchmark_args) result = benchmark.run() _check_summary_is_not_empty(result.inference_summary) _check_summary_is_not_empty(result.train_summary) self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class TFBenchmarkTest(unittest.TestCase): def check_results_dict_not_empty(self, results): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]): result = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(result) def test_inference_no_configs_eager(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_only_pretrain(self): MODEL_ID = "sgugger/tiny-distilbert-classification" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, only_pretrain_model=True, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_no_configs_graph(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_with_configs_eager(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_inference_with_configs_graph(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_train_no_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_train_with_configs(self): MODEL_ID = "sshleifer/tiny-gpt2" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=True, inference=False, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, [config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def test_inference_encoder_decoder_with_configs(self): MODEL_ID = "patrickvonplaten/t5-tiny-random" config = AutoConfig.from_pretrained(MODEL_ID) benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args, configs=[config]) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.") def test_inference_no_configs_xla(self): MODEL_ID = "sshleifer/tiny-gpt2" benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], training=False, inference=True, sequence_lengths=[8], batch_sizes=[1], use_xla=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) results = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def test_save_csv_files(self): MODEL_ID = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=True, save_to_csv=True, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(tmp_dir, "inf_time.csv"), inference_memory_csv_file=os.path.join(tmp_dir, "inf_mem.csv"), env_info_csv_file=os.path.join(tmp_dir, "env.csv"), multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) benchmark.run() self.assertTrue(Path(os.path.join(tmp_dir, "inf_time.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "inf_mem.csv")).exists()) self.assertTrue(Path(os.path.join(tmp_dir, "env.csv")).exists()) def test_trace_memory(self): MODEL_ID = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(summary): self.assertTrue(hasattr(summary, "sequential")) self.assertTrue(hasattr(summary, "cumulative")) self.assertTrue(hasattr(summary, "current")) self.assertTrue(hasattr(summary, "total")) with tempfile.TemporaryDirectory() as tmp_dir: benchmark_args = TensorFlowBenchmarkArguments( models=[MODEL_ID], inference=True, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(tmp_dir, "log.txt"), log_print=True, trace_memory_line_by_line=True, eager_mode=True, multi_process=False, ) benchmark = TensorFlowBenchmark(benchmark_args) result = benchmark.run() _check_summary_is_not_empty(result.inference_summary) self.assertTrue(Path(os.path.join(tmp_dir, "log.txt")).exists())
codingutf8 2023 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license refer to the full test suite in optimum library https github comhuggingfaceoptimumtreemaintestsbettertransformer modelid hfinternaltestingtinyrandomt5 tokenizer autotokenizer frompretrainedmodelid model automodelforseq2seqlm frompretrainedmodelid inp tokenizerthis is me returntensorspt model model tobettertransformer self asserttrueanybettertransformer in mod class name for mod in model namedmodules output model generateinp model model reversebettertransformer self assertfalseanybettertransformer in mod class name for mod in model namedmodules with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname modelreloaded automodelforseq2seqlm frompretrainedtmpdirname self assertfalse anybettertransformer in mod class name for mod in modelreloaded namedmodules outputfrompretrained modelreloaded generateinp self asserttruetorch allcloseoutput outputfrompretrained def testerrorsavepretrainedself r the savepretrained method should raise a valueerror if the model is in bettertransformer mode all should be good if the model is reversed coding utf 8 2023 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license refer to the full test suite in optimum library https github com huggingface optimum tree main tests bettertransformer classic tests to simply check if the conversion has been successfull the save_pretrained method should raise a valueerror if the model is in bettertransformer mode all should be good if the model is reversed
import tempfile import unittest from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class BetterTransformerIntegrationTest(unittest.TestCase): def test_transform_and_reverse(self): r model_id = "hf-internal-testing/tiny-random-t5" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForSeq2SeqLM.from_pretrained(model_id) inp = tokenizer("This is me", return_tensors="pt") model = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) output = model.generate(**inp) model = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_reloaded = AutoModelForSeq2SeqLM.from_pretrained(tmpdirname) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()) ) output_from_pretrained = model_reloaded.generate(**inp) self.assertTrue(torch.allclose(output, output_from_pretrained)) def test_error_save_pretrained(self): r model_id = "hf-internal-testing/tiny-random-t5" model = AutoModelForSeq2SeqLM.from_pretrained(model_id) model = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(ValueError): model.save_pretrained(tmpdirname) model = model.reverse_bettertransformer() model.save_pretrained(tmpdirname)
2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license default torch distributed port when using a single gpu launcher emulation i e not deepspeed or python m torch distributed the issue is that once the port is tied it can t be used anywhere else outside of this process since torch dist doesn t free the port until the process exits therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports this function will give the right port in the right context for real launcher it ll give the base port for emulated launcher it ll give the base port 1 in both cases a string is returned args reallauncher whether a real launcher is going to be used or the emulated one hack to restore original logging level pre 21700 1 explicitly set numnodes1 just in case these tests end up run on a multinode setup it won t be able to handle that 2 for now testing with just 2 gpus max since some quality tests may give different results with mode gpus because we use very little data customize the test name generator function as we want both params to appear in the subtest name as by default it shows only the first param resume from ckpt split cmd launcher fsdpconfig script args keep for quick debug print joinfnpythonpathself srcdirstr cmd die executesubprocessasynccmd envself getenv logs trainerstate loadfromjsonos path joinoutputdir trainerstate json loghistory return logs def getbaseargsself outputdir numepochs loggingsteps return f modelnameorpath bertbasecased taskname mrpc outputdir outputdir overwriteoutputdir dotrain maxseqlength 128 perdevicetrainbatchsize 16 learningrate 5e5 numtrainepochs numepochs lrschedulertype cosine loggingsteps loggingsteps savestrategy epoch doeval evaluationstrategy epoch reportto none 2023 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa default torch distributed port when using a single gpu launcher emulation i e not deepspeed or python m torch distributed the issue is that once the port is tied it can t be used anywhere else outside of this process since torch dist doesn t free the port until the process exits therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports this function will give the right port in the right context for real launcher it ll give the base port for emulated launcher it ll give the base port 1 in both cases a string is returned args real_launcher whether a real launcher is going to be used or the emulated one noqa hack to restore original logging level pre 21700 1 explicitly set num_nodes 1 just in case these tests end up run on a multi node setup it won t be able to handle that 2 for now testing with just 2 gpus max since some quality tests may give different results with mode gpus because we use very little data accelerate launch num_processes num_gpus main_process_port master_port use_fsdp fsdp_auto_wrap_policy transformer_based_wrap fsdp_state_dict_type sharded_state_dict fsdp_transformer_layer_cls_to_wrap bertlayer customize the test name generator function as we want both params to appear in the sub test name as by default it shows only the first param resume from ckpt fsdp_sharding_strategy fsdp_sharding_strategy index sharding_strategy upper 1 keep for quick debug print join f npythonpath self src_dir_str cmd die model_name_or_path bert base cased task_name mrpc output_dir output_dir overwrite_output_dir do_train max_seq_length 128 per_device_train_batch_size 16 learning_rate 5e 5 num_train_epochs num_epochs lr_scheduler_type cosine logging_steps logging_steps save_strategy epoch do_eval evaluation_strategy epoch report_to none
import itertools import os import unittest from functools import partial from parameterized import parameterized import tests.trainer.test_trainer from tests.trainer.test_trainer import TrainerIntegrationCommon from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, mockenv_context, require_accelerate, require_fsdp, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import FSDPOption, set_seed from transformers.utils import is_accelerate_available, is_torch_bf16_available_on_device if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_1 else: is_torch_greater_or_equal_than_2_1 = False DEFAULT_MASTER_PORT = "10999" dtypes = ["fp16"] if is_torch_bf16_available_on_device(torch_device): dtypes += ["bf16"] sharding_strategies = ["full_shard", "shard_grad_op"] state_dict_types = ["FULL_STATE_DICT", "SHARDED_STATE_DICT"] set_seed(42) params = list(itertools.product(sharding_strategies, dtypes)) def get_master_port(real_launcher=False): master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base if is_torch_available(): from tests.trainer.test_trainer import ( RegressionModelConfig, RegressionPreTrainedModel, ) get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info") require_fsdp_version = require_fsdp if is_accelerate_available(): from accelerate.utils.constants import ( FSDP_PYTORCH_VERSION, FSDP_SHARDING_STRATEGY, ) require_fsdp_version = partial(require_fsdp, min_version=FSDP_PYTORCH_VERSION) def get_launcher(distributed=False, use_accelerate=False): num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) if use_accelerate: return f.split() return f"torchrun --nnodes 1 --nproc-per-node {num_gpus} --master-port {master_port}".split() def _parameterized_custom_name_func(func, param_num, param): param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" @require_accelerate @require_torch_accelerator @require_fsdp_version class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.fsdp_config = { "backward_prefetch": "backward_pre", "forward_prefetch": "False", "limit_all_gathers": "False", "use_orig_params": "True", "sync_module_states": "True", "activation_checkpointing": "False", "min_num_params": 1, } def tearDown(self): super().tearDown() @parameterized.expand(params, name_func=_parameterized_custom_name_func) def test_fsdp_config(self, sharding_strategy, dtype): output_dir = self.get_auto_remove_tmp_dir() kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "fsdp": f"{sharding_strategy} offload auto_wrap", "fsdp_config": self.fsdp_config, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) self.assertEqual(trainer.args.fsdp[0], sharding_strategy) self.assertEqual(trainer.args.fsdp[1], FSDPOption.OFFLOAD) self.assertEqual(trainer.args.fsdp[2], FSDPOption.AUTO_WRAP) for k, v in trainer.args.fsdp_config.items(): self.assertEqual(v, self.fsdp_config[k]) self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_basic_run(self, sharding_strategy, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}"] fsdp_args = ["--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(dtypes) @require_torch_multi_accelerator @slow @unittest.skipIf(not is_torch_greater_or_equal_than_2_1, reason="This test on pytorch 2.0 takes 4 hours.") def test_basic_run_with_cpu_offload(self, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}", "--max_steps", "10"] fsdp_args = ["--fsdp", "full_shard auto_wrap offload", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(state_dict_types, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_training_and_can_resume_normally(self, state_dict_type): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) sharding_strategy = "full_shard" use_accelerate = state_dict_type == "SHARDED_STATE_DICT" launcher = get_launcher(True, use_accelerate=use_accelerate) args = self.get_base_args(output_dir, 2, 25).split() script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] logs = self.run_cmd_and_get_logs(use_accelerate, sharding_strategy, launcher, script, args, output_dir) checkpoint = os.path.join(output_dir, "checkpoint-115") resume_args = args + f"--resume_from_checkpoint {checkpoint}".split() logs_resume = self.run_cmd_and_get_logs( use_accelerate, sharding_strategy, launcher, script, resume_args, output_dir ) for log, log1 in zip(logs, logs_resume): if "learning_rate" in log: self.assertAlmostEqual(log["learning_rate"], log1["learning_rate"], delta=1e-5) def run_cmd_and_get_logs(self, use_accelerate, sharding_strategy, launcher, script, args, output_dir): if not use_accelerate: fsdp_args = [ "--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer", ] cmd = launcher + script + args + fsdp_args else: fsdp_config = f.split() cmd = launcher + fsdp_config + script + args execute_subprocess_async(cmd, env=self.get_env()) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history return logs def get_base_args(self, output_dir, num_epochs, logging_steps): return f
codingutf8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for consistency across different places the disjunctiveconstraint is called dc tokenids is a list of integers it is also initialized only by integers we can t have constraints that are complete subsets of another this leads to a preverse interpretation of constraint fulfillment does generating 1 2 3 fulfill the constraint it would mean that it generated 1 2 which fulfills it but it s in the middle of potentially fulfilling 1 2 3 4 if we believe that 1 2 3 does fulfill the constraint then the algorithm will necessarily never reach 1 2 3 4 giving users a false sense of control better to just not allow it coding utf 8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for consistency across different places the disjunctiveconstraint is called dc token_ids is a list of integers it is also initialized only by integers we can t have constraints that are complete subsets of another this leads to a preverse interpretation of constraint fulfillment does generating 1 2 3 fulfill the constraint it would mean that it generated 1 2 which fulfills it but it s in the middle of potentially fulfilling 1 2 3 4 if we believe that 1 2 3 does fulfill the constraint then the algorithm will necessarily never reach 1 2 3 4 giving users a false sense of control better to just not allow it fails here completed completed completed
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class ConstraintTest(unittest.TestCase): def test_input_types(self): cset = [[1, 2, 4], [1, 2, 3, 4]] dc = DisjunctiveConstraint(cset) self.assertTrue(isinstance(dc.token_ids, list)) with self.assertRaises(ValueError): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]])) with self.assertRaises(ValueError): DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])]) def test_check_illegal_input(self): cset = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(ValueError): DisjunctiveConstraint(cset) def test_example_progression(self): cset = [[1, 2, 3], [1, 2, 4]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) desired = stepped is True and completed is False and reset is False self.assertTrue(desired) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(3) desired = stepped is True and completed is True and reset is False self.assertTrue(desired) self.assertTrue(dc.completed) self.assertTrue(dc.current_seq == [1, 2, 3]) def test_example_progression_unequal_three_mid_and_reset(self): cset = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] dc = DisjunctiveConstraint(cset) stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(4) self.assertTrue(not dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) self.assertTrue(dc.current_seq == [1, 2, 4, 5]) dc.reset() stepped, completed, reset = dc.update(1) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 3) self.assertTrue(dc.current_seq == [1]) stepped, completed, reset = dc.update(2) self.assertTrue(not dc.completed) self.assertTrue(dc.remaining() == 2) self.assertTrue(dc.current_seq == [1, 2]) stepped, completed, reset = dc.update(5) self.assertTrue(dc.completed) self.assertTrue(dc.remaining() == 0) self.assertTrue(dc.current_seq == [1, 2, 5])
codingutf8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license cannot be randomly generated check that correct number of beam hypotheses is set in beam scorer check correct type check that numbeams is correctly set check for early stopping deactivated if early stopping true score does not matter reinit add numbeams 1 beams to change worstscore 10 0 is removed 9 0 is worst score 5 0 is better than worst score should not be finished 20 0 is worse than worst score should be finished check too many eos tokens check all batches are done beam scorer should be done check check all outptus cut out id of eos token and take best numbeams outputs add numbeams batchidx make sure ids of eos token are correctly saved in beamhyps of beam scorer maxlength should be only one more than current inputids to check that eos is correctly appended update beams and append to inputids first batch first output has to finish with eos token id since scores are correctly sorted make sure corresponding score is as good as possible to surely be picked first finalize since numbeamhypstokeep 1 only return batchsize x maxlength check sequencescores first batch has to finish with eostoken other batches cannot finish with eos token now test that if numbeamhypstokeep is 3 all beams are returned cannot be randomly generated check that correct number of beam hypotheses is set in beam scorer check correct type check that numbeams is correctly set check for early stopping deactivated if early stopping true score does not matter reinit add numbeams 1 beams to change worstscore 10 0 is removed 9 0 is worst score 5 0 is better than worst score should not be finished 20 0 is worse than worst score should be finished check too many eos tokens check all batches are done beam scorer should be done check check all outptus cut out id of eos token and take best numbeams outputs add numbeams batchidx make sure ids of eos token are correctly saved in beamhyps of beam scorer maxlength should be only one more than current inputids to check that eos is correctly appended for testing finalize we do want to have fulfilled constraints update beams and append to inputids first batch first output has to finish with eos token id since scores are correctly sorted make sure corresponding score is as good as possible to surely be picked first finalize since numbeamhypstokeep 1 only return batchsize x maxlength check sequencescores first batch has to finish with eostoken other batches cannot finish with eos token test that the constraint is indeed fulfilled disjunctive case now test that if numbeamhypstokeep is 3 all beams are returned constrainedbeamscorer numbeamhypstokeep self numbeams check if tensor1 inside tensor2 or tensor2 inside tensor1 set to same device we don t care what device coding utf 8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license cannot be randomly generated check that correct number of beam hypotheses is set in beam scorer check correct type check that num_beams is correctly set check for early stopping deactivated if early stopping true score does not matter re init add num_beams 1 beams to change worst_score 10 0 is removed 9 0 is worst score 5 0 is better than worst score should not be finished 20 0 is worse than worst score should be finished check too many eos tokens check all batches are done beam scorer should be done check check all outptus cut out id of eos token and take best num_beams outputs add num_beams batch_idx make sure ids of eos token are correctly saved in beam_hyps of beam scorer max_length should be only one more than current input_ids to check that eos is correctly appended update beams and append to input_ids first batch first output has to finish with eos token id since scores are correctly sorted make sure corresponding score is as good as possible to surely be picked first finalize since num_beam_hyps_to_keep 1 only return batch_size x max_length check sequence_scores first batch has to finish with eos_token other batches cannot finish with eos token now test that if num_beam_hyps_to_keep is 3 all beams are returned cannot be randomly generated check that correct number of beam hypotheses is set in beam scorer check correct type check that num_beams is correctly set check for early stopping deactivated if early stopping true score does not matter re init add num_beams 1 beams to change worst_score 10 0 is removed 9 0 is worst score 5 0 is better than worst score should not be finished 20 0 is worse than worst score should be finished check too many eos tokens check all batches are done beam scorer should be done check check all outptus cut out id of eos token and take best num_beams outputs add num_beams batch_idx make sure ids of eos token are correctly saved in beam_hyps of beam scorer max_length should be only one more than current input_ids to check that eos is correctly appended for testing finalize we do want to have fulfilled constraints update beams and append to input_ids first batch first output has to finish with eos token id since scores are correctly sorted make sure corresponding score is as good as possible to surely be picked first finalize since num_beam_hyps_to_keep 1 only return batch_size x max_length check sequence_scores first batch has to finish with eos_token other batches cannot finish with eos token test that the constraint is indeed fulfilled disjunctive case now test that if num_beam_hyps_to_keep is 3 all beams are returned constrained_beam_scorer num_beam_hyps_to_keep self num_beams check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1 set to same device we don t care what device
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) self.parent.assertTrue(beam_scorer.is_done) beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) tokens = next_tokens.clone() tokens[0, 0] = self.eos_token_id next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) self.parent.assertFalse((sequence_scores > 0).any().item()) self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) self.parent.assertTrue(constrained_beam_scorer.is_done) constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) offset = torch.div( torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams, rounding_mode="floor" ) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): max_length = self.sequence_length + 1 stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints tokens = next_tokens.clone() tokens[0, 0] = self.eos_token_id next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) self.parent.assertFalse((sequence_scores > 0).any().item()) self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
codingutf8 2022 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license checks parameters that were specified checks parameters that were not specified defaults the generation config has loaded a few nondefault parameters from the model config one of those parameters is eostokenid check if it matches updatekwargs was not modified no side effects updatekwargs was used to update the config on valid attributes update returns a dictionary of unused kwargs updatekwargs was used to update the config on valid attributes tests that we can overwrite attributes at frompretrained time defaultconfig generationconfig self assertequaldefaultconfig temperature 1 0 self assertequaldefaultconfig dosample false self assertequaldefaultconfig numbeams 1 config generationconfig dosampletrue temperature0 7 lengthpenalty1 0 badwordsids1 2 3 4 5 self assertequalconfig temperature 0 7 self assertequalconfig dosample true self assertequalconfig numbeams 1 with tempfile temporarydirectory as tmpdir config savepretrainedtmpdir loadedconfig generationconfig frompretrainedtmpdir temperature1 0 self assertequalloadedconfig temperature 1 0 self assertequalloadedconfig dosample true self assertequalloadedconfig numbeams 1 default value def testvalidateself case 1 a correct configuration will not throw any warning with warnings catchwarningsrecordtrue as capturedwarnings generationconfig self assertequallencapturedwarnings 0 case 2 inconsequent but technically wrong configuration will throw a warning e g setting sampling parameters with dosamplefalse may be escalated to an error in the future with warnings catchwarningsrecordtrue as capturedwarnings generationconfigtemperature0 5 self assertequallencapturedwarnings 1 case 3 impossible sets of contraintsparameters will raise an exception with self assertraisesvalueerror generationconfignumreturnsequences2 case 4 passing generateonly flags to validate will raise an exception with self assertraisesvalueerror generationconfiglogitsprocessorfoo case 5 modelspecific parameters will not raise an exception or a warning with warnings catchwarningsrecordtrue as capturedwarnings generationconfigfoobar self assertequallencapturedwarnings 0 def testrefusetosaveself setting the temperature alone is invalid as we also need to set dosample to true throws a warning that is caught doesn t save and raises a warning greedy decoding throws an exception if we try to return multiple sequences throws an exception that is caught doesn t save and raises a warning final check no warnings thrown if it is correct and file is saved reset repo push to hub via savepretrained reset repo push to hub via savepretrained coding utf 8 2022 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license checks parameters that were specified checks parameters that were not specified defaults the generation config has loaded a few non default parameters from the model config one of those parameters is eos_token_id check if it matches update_kwargs was not modified no side effects update_kwargs was used to update the config on valid attributes update returns a dictionary of unused kwargs update_kwargs was used to update the config on valid attributes no new kwargs should be initialized if from config tests that we can overwrite attributes at from_pretrained time default value tests that the validate method is working as expected note that validate is called at initialization time case 1 a correct configuration will not throw any warning case 2 inconsequent but technically wrong configuration will throw a warning e g setting sampling parameters with do_sample false may be escalated to an error in the future case 3 impossible sets of contraints parameters will raise an exception case 4 passing generate only flags to validate will raise an exception case 5 model specific parameters will not raise an exception or a warning tests that we refuse to save a generation config that fails validation setting the temperature alone is invalid as we also need to set do_sample to true throws a warning that is caught doesn t save and raises a warning greedy decoding throws an exception if we try to return multiple sequences throws an exception that is caught doesn t save and raises a warning final check no warnings thrown if it is correct and file is saved reset repo push to hub via save_pretrained reset repo push to hub via save_pretrained
import copy import os import tempfile import unittest import warnings from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class GenerationConfigTest(unittest.TestCase): @parameterized.expand([(None,), ("foo.json",)]) def test_save_load_config(self, config_name): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, config_name=config_name) loaded_config = GenerationConfig.from_pretrained(tmp_dir, config_name=config_name) self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.temperature, 0.7) self.assertEqual(loaded_config.length_penalty, 1.0) self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]]) self.assertEqual(loaded_config.top_k, 50) self.assertEqual(loaded_config.max_length, 20) self.assertEqual(loaded_config.max_time, None) def test_from_model_config(self): model_config = AutoConfig.from_pretrained("gpt2") generation_config_from_model = GenerationConfig.from_model_config(model_config) default_generation_config = GenerationConfig() self.assertNotEqual(generation_config_from_model, default_generation_config) self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id) self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id) def test_update(self): generation_config = GenerationConfig() update_kwargs = { "max_new_tokens": 1024, "foo": "bar", } update_kwargs_copy = copy.deepcopy(update_kwargs) unused_kwargs = generation_config.update(**update_kwargs) self.assertEqual(update_kwargs, update_kwargs_copy) self.assertEqual(generation_config.max_new_tokens, 1024) self.assertEqual(unused_kwargs, {"foo": "bar"}) def test_initialize_new_kwargs(self): generation_config = GenerationConfig() generation_config.foo = "bar" with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir: generation_config.save_pretrained(tmp_dir) new_config = GenerationConfig.from_pretrained(tmp_dir) self.assertEqual(new_config.foo, "bar") generation_config = GenerationConfig.from_model_config(new_config) assert not hasattr(generation_config, "foo") def test_kwarg_init(self): default_config = GenerationConfig() self.assertEqual(default_config.temperature, 1.0) self.assertEqual(default_config.do_sample, False) self.assertEqual(default_config.num_beams, 1) config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], ) self.assertEqual(config.temperature, 0.7) self.assertEqual(config.do_sample, True) self.assertEqual(config.num_beams, 1) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) loaded_config = GenerationConfig.from_pretrained(tmp_dir, temperature=1.0) self.assertEqual(loaded_config.temperature, 1.0) self.assertEqual(loaded_config.do_sample, True) self.assertEqual(loaded_config.num_beams, 1) def test_validate(self): with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig() self.assertEqual(len(captured_warnings), 0) with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig(temperature=0.5) self.assertEqual(len(captured_warnings), 1) with self.assertRaises(ValueError): GenerationConfig(num_return_sequences=2) with self.assertRaises(ValueError): GenerationConfig(logits_processor="foo") with warnings.catch_warnings(record=True) as captured_warnings: GenerationConfig(foo="bar") self.assertEqual(len(captured_warnings), 0) def test_refuse_to_save(self): config = GenerationConfig() config.temperature = 0.5 with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 1) self.assertTrue("Fix these issues to save the configuration." in str(captured_warnings[0].message)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) config = GenerationConfig() config.num_return_sequences = 2 with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 1) self.assertTrue("Fix these issues to save the configuration." in str(captured_warnings[0].message)) self.assertTrue(len(os.listdir(tmp_dir)) == 0) config = GenerationConfig() with tempfile.TemporaryDirectory() as tmp_dir: with warnings.catch_warnings(record=True) as captured_warnings: config.save_pretrained(tmp_dir) self.assertEqual(len(captured_warnings), 0) self.assertTrue(len(os.listdir(tmp_dir)) == 1) @is_staging_test class ConfigPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-generation-config") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-generation-config-org") except HTTPError: pass def test_push_to_hub(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("test-generation-config", token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) delete_repo(token=self._token, repo_id="test-generation-config") with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir, repo_id="test-generation-config", push_to_hub=True, token=self._token) new_config = GenerationConfig.from_pretrained(f"{USER}/test-generation-config") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) def test_push_to_hub_in_organization(self): config = GenerationConfig( do_sample=True, temperature=0.7, length_penalty=1.0, ) config.push_to_hub("valid_org/test-generation-config-org", token=self._token) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) delete_repo(token=self._token, repo_id="valid_org/test-generation-config-org") with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( tmp_dir, repo_id="valid_org/test-generation-config-org", push_to_hub=True, token=self._token ) new_config = GenerationConfig.from_pretrained("valid_org/test-generation-config-org") for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k))
codingutf8 2021 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tweak scores to not be uniform anymore compute softmax uniform distribution stays uniform sharp peaks get higher valleys get lower smooth peaks get lower valleys get higher create ramp distribution check that correct tokens are filtered check special case mintokens overwrites k 3 tokens are kept 2 tokens are nullified create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to keep min num values so that sum is topp exp inf 0 check edge cases with negative and extreme logits make ramplogits more extreme make sure at least 2 tokens are kept first batch should keep three tokens second batch would keep only 1 but due to mintokenstokeep2 keeps 2 check that min length is applied at length 5 check that min length is not applied anymore at length 15 check that all scores are inf except the bostokenid score check that bostokenid is not forced if current length is greater than 1 check that all scores are inf except the eostokenid when maxlength is reached check that eostokenid is not forced if maxlength is not reached dummy inputids and scores instantiate all dist processors instantiate all logits processors no processor list with processor list scores should be equal inputids should never be changed dummy inputids and scores instantiate all dist processors instantiate all logits processors no processor list with processor list scores should be equal inputids should never be changed coding utf 8 2021 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license tweak scores to not be uniform anymore peak 1st batch valley 1st batch compute softmax uniform distribution stays uniform sharp peaks get higher valleys get lower smooth peaks get lower valleys get higher create ramp distribution check that correct tokens are filtered check special case min_tokens overwrites k 3 tokens are kept 2 tokens are nullified create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to keep min num values so that sum is top_p exp inf 0 check edge cases with negative and extreme logits make ramp_logits more extreme make sure at least 2 tokens are kept first batch should keep three tokens second batch would keep only 1 but due to min_tokens_to_keep 2 keeps 2 check that min length is applied at length 5 check that min length is not applied anymore at length 15 check that all scores are inf except the bos_token_id score score for bos_token_id shold be zero check that bos_token_id is not forced if current length is greater than 1 check that all scores are inf except the eos_token_id when max_length is reached score for eos_token_id should be zero check that eos_token_id is not forced if max_length is not reached dummy input_ids and scores instantiate all dist processors instantiate all logits processors no processor list with processor list scores should be equal input_ids should never be changed dummy input_ids and scores instantiate all dist processors instantiate all logits processors no processor list with processor list scores should be equal input_ids should never be changed
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = jnp.ones((batch_size, length)) / length return scores def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) scores = scores.at[1, 5].set((1 / length) + 0.1) scores = scores.at[1, 10].set((1 / length) - 0.4) probs = jax.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = FlaxTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = FlaxTemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = jax.nn.softmax(temp_dist_warper_sharper(input_ids, scores.copy(), cur_len=None), axis=-1) warped_prob_smooth = jax.nn.softmax(temp_dist_warper_smoother(input_ids, scores.copy(), cur_len=None), axis=-1) self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = FlaxTopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits, cur_len=None) self.assertListEqual(jnp.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) length = 5 top_k_warp_safety_check = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) ramp_logits = np.broadcast_to(np.arange(length)[None, :], (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len=None) self.assertListEqual((scores == 0.0).sum(axis=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) top_p_warp = FlaxTopPLogitsWarper(0.8) filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None)) EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) ramp_logits = np.broadcast_to(np.arange(vocab_size)[None, :], (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) ramp_logits[1] = ramp_logits[1] * 100.0 top_p_warp = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len=None) self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist(), [3, 2]) def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) input_ids = ids_tensor((batch_size, 20), vocab_size=20) cur_len = 5 scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) scores = self._get_uniform_logits(batch_size, vocab_size) cur_len = 15 scores_before_min_length = min_dist_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores_before_min_length).any()) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) input_ids = ids_tensor((batch_size, 1), vocab_size=20) cur_len = 1 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) input_ids = ids_tensor((batch_size, 4), vocab_size=20) cur_len = 4 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) cur_len = 3 scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len=cur_len) self.assertFalse(jnp.isinf(scores).any()) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores_comp = processor(input_ids, scores_comp, cur_len=cur_len) self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_processor_list_jitted(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 2 bos_token_id = 1 max_length = 15 input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.copy() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.copy() temp_dist_warp = FlaxTemperatureLogitsWarper(temperature=0.5) top_k_warp = FlaxTopKLogitsWarper(3) top_p_warp = FlaxTopPLogitsWarper(0.8) min_dist_proc = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) bos_dist_proc = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) eos_dist_proc = FlaxForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) cur_len = 10 def run_no_processor_list(input_ids, scores, cur_len): scores = temp_dist_warp(input_ids, scores, cur_len=cur_len) scores = top_k_warp(input_ids, scores, cur_len=cur_len) scores = top_p_warp(input_ids, scores, cur_len=cur_len) scores = min_dist_proc(input_ids, scores, cur_len=cur_len) scores = bos_dist_proc(input_ids, scores, cur_len=cur_len) scores = eos_dist_proc(input_ids, scores, cur_len=cur_len) return scores def run_processor_list(input_ids, scores, cur_len): processor = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) scores = processor(input_ids, scores, cur_len=cur_len) return scores jitted_run_no_processor_list = jax.jit(run_no_processor_list) jitted_run_processor_list = jax.jit(run_processor_list) scores = jitted_run_no_processor_list(input_ids, scores, cur_len) scores_comp = jitted_run_processor_list(input_ids, scores_comp, cur_len) self.assertTrue(jnp.allclose(scores, scores_comp, atol=1e-3)) self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist())
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license creates a random int32 tensor of the shape within the vocab size if rng is none rng random random totaldims 1 for dim in shape totaldims dim values for in rangetotaldims values appendrng randint0 vocabsize 1 output np arrayvalues dtypejnp int32 reshapeshape return output def randomattentionmaskshape rngnone attnmask idstensorshape vocabsize2 rngrng make sure that at least one token is attended to for each batch attnmask 1 1 return attnmask requireflax class flaxgenerationtestermixin modeltester none allgenerativemodelclasses def getinputidsandconfigself config inputs self modeltester prepareconfigandinputsforcommon cut to half length take max batchsize 3 maxbatchsize 2 sequencelength inputsinputids shape1 2 inputids inputsinputids maxbatchsize sequencelength attentionmask jnp oneslikeinputids attentionmask attentionmask maxbatchsize sequencelength generate max 5 tokens maxlength inputids shape1 5 if config eostokenid is not none and config padtokenid is none hack to allow generate for models such as gpt2 as is done in generate config padtokenid config eostokenid return config inputids attentionmask maxlength isptflaxcrosstest def testgreedygenerateptfxself config inputids maxlength self getinputidsandconfig config dosample false config maxlength maxlength config decoderstarttokenid 0 for modelclass in self allgenerativemodelclasses flaxmodel modelclassconfig ptmodelclassname modelclass name4 skip the flax at the beginning ptmodelclass getattrtransformers ptmodelclassname ptmodel ptmodelclassconfig eval ptmodel loadflaxweightsinpytorchmodelptmodel flaxmodel params flaxgenerationoutputs flaxmodel generateinputids sequences ptgenerationoutputs ptmodel generatetorch tensorinputids dtypetorch long if flaxgenerationoutputs shape1 ptgenerationoutputs shape1 flaxgenerationoutputs flaxgenerationoutputs ptgenerationoutputs shape1 self assertlistequalptgenerationoutputs numpy tolist flaxgenerationoutputs tolist def testgreedygenerateself config inputids maxlength self getinputidsandconfig config dosample false config maxlength maxlength for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testsamplegenerateself config inputids maxlength self getinputidsandconfig config dosample true config maxlength maxlength for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testbeamsearchgenerateself config inputids maxlength self getinputidsandconfig config dosample false config maxlength maxlength config numbeams 2 for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testbeamsearchgeneratenumreturnsequencesself config inputids maxlength self getinputidsandconfig config dosample false config maxlength maxlength config numbeams 2 config numreturnsequences 2 for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids sequences self assertequalgenerationoutputs shape0 inputids shape0 config numreturnsequences def testsamplegeneratelogitswarperself config inputids maxlength self getinputidsandconfig config dosample true config maxlength maxlength config temperature 0 8 config topk 10 config topp 0 3 config minlength 1 config forcedbostokenid 8 config forcedeostokenid 9 for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testgreedygeneratelogitswarperself config inputids maxlength self getinputidsandconfig config maxlength maxlength config minlength 1 config forcedbostokenid 8 config forcedeostokenid 9 for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testbeamsearchgeneratelogitswarperself config inputids maxlength self getinputidsandconfig config maxlength maxlength config numbeams 2 config minlength 1 config forcedbostokenid 8 config forcedeostokenid 9 for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testgreedygenerateattnmaskself config inputids attentionmask maxlength self getinputidsandconfig pad attention mask on the left attentionmask attentionmask at0 0 set0 config dosample false config maxlength maxlength for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids attentionmaskattentionmask sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids attentionmaskattentionmask sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testsamplegenerateattnmaskself config inputids attentionmask maxlength self getinputidsandconfig pad attention mask on the left attentionmask attentionmask at0 0 set0 config dosample true config maxlength maxlength for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids attentionmaskattentionmask sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids attentionmaskattentionmask sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist def testbeamsearchgenerateattnmaskself config inputids attentionmask maxlength self getinputidsandconfig pad attention mask on the left attentionmask attentionmask at0 0 set0 config numbeams 2 config maxlength maxlength for modelclass in self allgenerativemodelclasses model modelclassconfig generationoutputs model generateinputids attentionmaskattentionmask sequences self assertequalgenerationoutputs shape1 maxlength jitgenerate jitmodel generate jitgenerationoutputs jitgenerateinputids attentionmaskattentionmask sequences self assertlistequalgenerationoutputs tolist jitgenerationoutputs tolist requireflax class flaxgenerationintegrationtestsunittest testcase def testvalidategenerationinputsself tokenizer autotokenizer frompretrainedhfinternaltestingtinybert model flaxautomodelforcausallm frompretrainedhfinternaltestingtinybertflaxonly encoderinputstr hello world inputids tokenizerencoderinputstr returntensorsnp inputids typos are quickly detected the correct argument is dosample with self assertraisesregexvalueerror dosamples model generateinputids dosamplestrue arbitrary arguments that will not be used anywhere are also not accepted with self assertraisesregexvalueerror foo fakemodelkwargs foo bar model generateinputids fakemodelkwargs 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license assumed parallelism 8 creates a random int32 tensor of the shape within the vocab size make sure that at least one token is attended to for each batch cut to half length take max batch_size 3 generate max 5 tokens hack to allow generate for models such as gpt2 as is done in generate skip the flax at the beginning pad attention mask on the left pad attention mask on the left pad attention mask on the left typos are quickly detected the correct argument is do_sample arbitrary arguments that will not be used anywhere are also not accepted
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" if is_torch_available(): import torch def ids_tensor(shape, vocab_size, rng=None): if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) attn_mask[:, -1] = 1 return attn_mask @require_flax class FlaxGenerationTesterMixin: model_tester = None all_generative_model_classes = () def _get_input_ids_and_config(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() max_batch_size = 2 sequence_length = inputs["input_ids"].shape[-1] // 2 input_ids = inputs["input_ids"][:max_batch_size, :sequence_length] attention_mask = jnp.ones_like(input_ids) attention_mask = attention_mask[:max_batch_size, :sequence_length] max_length = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: config.pad_token_id = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def test_greedy_generate_pt_fx(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.decoder_start_token_id = 0 for model_class in self.all_generative_model_classes: flax_model = model_class(config) pt_model_class_name = model_class.__name__[4:] pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, flax_model.params) flax_generation_outputs = flax_model.generate(input_ids).sequences pt_generation_outputs = pt_model.generate(torch.tensor(input_ids, dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: flax_generation_outputs = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist()) def test_greedy_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_num_return_sequences(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = False config.max_length = max_length config.num_beams = 2 config.num_return_sequences = 2 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences) def test_sample_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.do_sample = True config.max_length = max_length config.temperature = 0.8 config.top_k = 10 config.top_p = 0.3 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_logits_warper(self): config, input_ids, _, max_length = self._get_input_ids_and_config() config.max_length = max_length config.num_beams = 2 config.min_length = 1 config.forced_bos_token_id = 8 config.forced_eos_token_id = 9 for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_greedy_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = False config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_sample_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() attention_mask = attention_mask.at[(0, 0)].set(0) config.do_sample = True config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def test_beam_search_generate_attn_mask(self): config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() attention_mask = attention_mask.at[(0, 0)].set(0) config.num_beams = 2 config.max_length = max_length for model_class in self.all_generative_model_classes: model = model_class(config) generation_outputs = model.generate(input_ids, attention_mask=attention_mask).sequences self.assertEqual(generation_outputs.shape[-1], max_length) jit_generate = jit(model.generate) jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) @require_flax class FlaxGenerationIntegrationTests(unittest.TestCase): def test_validate_generation_inputs(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") model = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="np").input_ids with self.assertRaisesRegex(ValueError, "do_samples"): model.generate(input_ids, do_samples=True) with self.assertRaisesRegex(ValueError, "foo"): fake_model_kwargs = {"foo": "bar"} model.generate(input_ids, **fake_model_kwargs)
codingutf8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check that min length is applied at length 5 check that min length is not applied anymore at length 15 check that first input is skipped min new length applying check that for skipping now prompt length is 5 after that we expect first 5 tokens will be skipped check that min length is applied at length 2 check that min new length is applied at length 6 because it has only 1 new token check that min new length is applied at length 7 because it has only 2 new tokens check that min new length is not applied anymore at length 8 check that min new length is not applied anymore at length 15 tweak scores to not be uniform anymore compute softmax uniform distribution stays uniform sharp peaks get higher valleys get lower smooth peaks get lower valleys get higher give values special values check that values were correctly changed give values special values check that values were correctly changed check that values not in the encoder ids were not changed create ramp distribution check that correct tokens are filtered check special cases uniform dist is not changed mintokens overwrites k 3 tokens are kept 2 tokens are nullified create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to keep min num values so that sum is topp exp inf 0 check edge cases with negative and extreme logits make ramplogits more extreme make sure at least 2 tokens are kept first batch should keep three tokens second batch would keep only 1 but due to mintokenstokeep2 keeps 2 create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to keep min num values so that sum is 0 7 exp inf 0 check special cases uniform dist is not changed check edge cases with negative and extreme logits make ramplogits more extreme make sure at least 2 tokens are kept first batch should keep two tokens second batch would keep only 1 but due to mintokenstokeep2 keeps 2 create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to only keep values with proba 0 1 exp inf 0 check edge cases with negative and extreme logits make ramplogits more extreme make sure at least 2 tokens are kept first batch should keep 3 tokens second batch would keep only 1 but due to mintokenstokeep2 keeps 2 create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to only keep values with proba min0 0625 sqrt0 0625 ehp min0 0625 0 1320 is the cutoff for the first row and min0 0625 0 1644 is for the second where h is the entropy function and p is the probability vector exp inf 0 check edge cases with negative and extreme logits make ramplogits more extreme make sure at least 2 tokens are kept first batch should keep 2 tokens second batch would keep only 1 but due to mintokenstokeep2 keeps 2 2gram would forbid 2nd and 3rd token 1 2 at 1st batch and 1st token 0 at 2nd batch 3gram would forbid no token at 1st batch and 1st token 0 at 2nd batch 2gram would forbid 1st and 2nd token at 1st beam and 1st token 0 at 2nd beam 3gram would forbid 1st token at 1st beam and no token at 2nd beam batched input 2gram batch 1 beam 1 tokens 1 2 forbidden beam 2 tokens 1 forbidden batch 2 beam 1 tokens 0 2 forbidden beam 2 tokens 1 forbidden batch 1 beam 1 tokens 1 forbidden beam 2 tokens forbidden batch 2 beam 1 tokens 2 forbidden beam 2 tokens forbidden batch 1 1st 2nd and 4th 0 1 3 token are forbidden batch 2 1st 2nd and 3rd 0 1 2 token are forbidden note that 5th element cannot be forbidden as it is eos token check edge case biases the same termination twice to ensure we can handle overlapping terminations it won t have an effect on the test cases though scores 0 to facilitate checks batch 1 positive bias tokens 1 4 negative bias tokens 0 3 neutral tokens 2 batch 2 positive bias tokens 1 4 negative bias tokens 0 2 neutral tokens 3 dummy inputids and scores instantiate all dist processors no processor list with processor list scores should be equal inputids should never be changed batch 1 1st 2nd 0 1 token are allowed batch 2 3rd 4th 2 3 token are allowed batchidx 0 index batchidx numbeamgroups idx 0 2 0 penalises tokens 1 batchidx 1 index batchidx numbeamgroups idx 1 2 2 penalises tokens 1 check that all scores are inf except the bostokenid score check that bostokenid is not forced if current length is greater than 1 check that all scores are inf except the eostokenid when maxlength1 is reached check that eostokenid is not forced if maxlength1 is not reached check that penalty is not applied before start check that penalty is applied after start check the penalty increases negative scores explicit unconditional prompt attention mask explicit unconditional prompt all implicit coding utf 8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check that min length is applied at length 5 check that min length is not applied anymore at length 15 check that first input is skipped min new length applying check that for skipping now prompt length is 5 after that we expect first 5 tokens will be skipped check that min length is applied at length 2 check that min new length is applied at length 6 because it has only 1 new token check that min new length is applied at length 7 because it has only 2 new tokens check that min new length is not applied anymore at length 8 check that min new length is not applied anymore at length 15 tweak scores to not be uniform anymore peak 1st batch valley 1st batch compute softmax uniform distribution stays uniform sharp peaks get higher valleys get lower smooth peaks get lower valleys get higher give values special values check that values were correctly changed give values special values check that values were correctly changed check that values not in the encoder ids were not changed create ramp distribution check that correct tokens are filtered check special cases uniform dist is not changed min_tokens overwrites k 3 tokens are kept 2 tokens are nullified create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to keep min num values so that sum is top_p exp inf 0 check edge cases with negative and extreme logits make ramp_logits more extreme make sure at least 2 tokens are kept first batch should keep three tokens second batch would keep only 1 but due to min_tokens_to_keep 2 keeps 2 create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to keep min num values so that sum is 0 7 exp inf 0 check special cases uniform dist is not changed check edge cases with negative and extreme logits make ramp_logits more extreme make sure at least 2 tokens are kept first batch should keep two tokens second batch would keep only 1 but due to min_tokens_to_keep 2 keeps 2 create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to only keep values with proba 0 1 exp inf 0 check edge cases with negative and extreme logits make ramp_logits more extreme make sure at least 2 tokens are kept first batch should keep 3 tokens second batch would keep only 1 but due to min_tokens_to_keep 2 keeps 2 create distribution and take log inverse to softmax as taken in topplogitswarper dist should be filtered to only keep values with proba min 0 0625 sqrt 0 0625 e h p min 0 0625 0 1320 is the cutoff for the first row and min 0 0625 0 1644 is for the second where h is the entropy function and p is the probability vector exp inf 0 check edge cases with negative and extreme logits make ramp_logits more extreme make sure at least 2 tokens are kept first batch should keep 2 tokens second batch would keep only 1 but due to min_tokens_to_keep 2 keeps 2 2 gram would forbid 2nd and 3rd token 1 2 at 1st batch and 1st token 0 at 2nd batch 3 gram would forbid no token at 1st batch and 1st token 0 at 2nd batch 2 gram would forbid 1st and 2nd token at 1st beam and 1st token 0 at 2nd beam 3 gram would forbid 1st token at 1st beam and no token at 2nd beam batched input 2gram batch 1 beam 1 tokens 1 2 forbidden beam 2 tokens 1 forbidden batch 2 beam 1 tokens 0 2 forbidden beam 2 tokens 1 forbidden batch 1 beam 1 tokens 1 forbidden beam 2 tokens forbidden batch 2 beam 1 tokens 2 forbidden beam 2 tokens forbidden batch 1 1st 2nd and 4th 0 1 3 token are forbidden batch 2 1st 2nd and 3rd 0 1 2 token are forbidden note that 5th element cannot be forbidden as it is eos token check edge case biases the same termination twice to ensure we can handle overlapping terminations it won t have an effect on the test cases though scores 0 to facilitate checks batch 1 positive bias tokens 1 4 negative bias tokens 0 3 neutral tokens 2 batch 2 positive bias tokens 1 4 negative bias tokens 0 2 neutral tokens 3 dummy input_ids and scores instantiate all dist processors no processor list with processor list scores should be equal input_ids should never be changed batch 1 1st 2nd 0 1 token are allowed batch 2 3rd 4th 2 3 token are allowed batch_idx 0 index batch_idx num_beam_groups idx 0 2 0 penalises tokens 1 batch_idx 1 index batch_idx num_beam_groups idx 1 2 2 penalises tokens 1 check that all scores are inf except the bos_token_id score score for bos_token_id shold be zero check that bos_token_id is not forced if current length is greater than 1 check that all scores are inf except the eos_token_id when max_length 1 is reached score for eos_token_id should be zero check that eos_token_id is not forced if max_length 1 is not reached check that penalty is not applied before start clone scores as precessor updates them inplace check that penalty is applied after start clone scores as precessor updates them inplace check the penalty increases negative scores clone scores as precessor updates them inplace explicit unconditional prompt attention mask explicit unconditional prompt all implicit some small float less than log min_eos_p
import unittest from typing import List, Union from parameterized import parameterized from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from torch import nn from transformers.generation import ( EncoderNoRepeatNGramLogitsProcessor, EncoderRepetitionPenaltyLogitsProcessor, EpsilonLogitsWarper, EtaLogitsWarper, ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, LogitsProcessorList, MinLengthLogitsProcessor, MinNewTokensLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, SequenceBiasLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, UnbatchedClassifierFreeGuidanceLogitsProcessor, ) from transformers.generation.logits_process import BarkEosPrioritizerLogitsProcessor @require_torch class LogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return scores def test_min_length_dist_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) input_ids = ids_tensor((batch_size, 5), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf")]) input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) @parameterized.expand([(0,), ([0, 18],)]) def test_new_min_length_dist_processor(self, eos_token_id: Union[int, List[int]]): vocab_size = 20 batch_size = 4 input_ids = ids_tensor((batch_size, 5), vocab_size=20) new_min_dist_processor = MinNewTokensLengthLogitsProcessor( prompt_length_to_skip=input_ids.shape[-1], min_new_tokens=3, eos_token_id=eos_token_id ) expected_eos_scores_before_min_length = batch_size * [-float("inf")] if isinstance(eos_token_id, list): expected_eos_scores_before_min_length *= len(eos_token_id) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) self.assertTrue(new_min_dist_processor.prompt_length_to_skip == 5) input_ids = ids_tensor((batch_size, 2), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) input_ids = ids_tensor((batch_size, 6), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) input_ids = ids_tensor((batch_size, 7), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertListEqual( scores_before_min_length[:, eos_token_id].flatten().tolist(), expected_eos_scores_before_min_length ) input_ids = ids_tensor((batch_size, 8), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) input_ids = ids_tensor((batch_size, 15), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = new_min_dist_processor(input_ids, scores) self.assertFalse(torch.isinf(scores_before_min_length).any()) def test_temperature_dist_warper(self): input_ids = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) scores[1, 5] = (1 / length) + 0.1 scores[1, 10] = (1 / length) - 0.4 probs = nn.functional.softmax(scores, dim=-1) temp_dist_warper_sharper = TemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TemperatureLogitsWarper(temperature=1.3) warped_prob_sharp = nn.functional.softmax(temp_dist_warper_sharper(input_ids, scores.clone()), dim=-1) warped_prob_smooth = nn.functional.softmax(temp_dist_warper_smoother(input_ids, scores.clone()), dim=-1) self.assertTrue(torch.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1e-3)) self.assertTrue(torch.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1e-3)) self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min()) self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min()) def test_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) scores = rep_penalty_proc(input_ids, scores.clone()) self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) / 2) def test_encoder_repetition_penalty_dist_process(self): input_ids = torch.tensor([[0, 1], [5, 0]], device=torch_device, dtype=torch.long) vocab_size = 10 scores = self._get_uniform_logits(batch_size=2, length=vocab_size) scores[0, 0] = -(1 / vocab_size) scores[1, 5] = 4 / vocab_size rep_penalty_proc = EncoderRepetitionPenaltyLogitsProcessor(penalty=2.0, encoder_input_ids=input_ids) scores = rep_penalty_proc(input_ids, scores.clone()) self.assertAlmostEqual(scores[0, 0].item(), -(1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 1].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 0].item(), (1 / vocab_size) * 2) self.assertAlmostEqual(scores[1, 5].item(), (4 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 2].item(), (1 / vocab_size)) self.assertAlmostEqual(scores[1, 2].item(), (1 / vocab_size)) def test_top_k_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 ramp_logits = ( torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) ) ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TopKLogitsWarper(3) scores = top_k_warp(input_ids, ramp_logits) self.assertListEqual(torch.isinf(scores[0]).tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(torch.isinf(scores[1]).tolist(), 2 * [True] + 3 * [False] + 5 * [True]) length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) scores = top_k_warp_safety_check(input_ids, logits) self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) ramp_logits = torch.arange(length, device=torch_device, dtype=torch.float).unsqueeze(0).repeat(batch_size, 1) scores = top_k_warp_safety_check(input_ids, ramp_logits) self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_top_p_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 dist = torch.log( torch.tensor([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float) ) top_p_warp = TopPLogitsWarper(0.8) filtered_dist = torch.exp(top_p_warp(input_ids, dist)) EXPECTED_FILTERED_DIST = torch.tensor( [[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) ramp_logits[1] = ramp_logits[1] * 100.0 top_p_warp = TopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = top_p_warp(input_ids, ramp_logits) self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_typical_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 dist = torch.log( torch.tensor([[0.97, 0.01, 0.01, 0.01], [0.4, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float) ) typical_warp = TypicalLogitsWarper(0.5) filtered_dist = torch.exp(typical_warp(input_ids, dist)) EXPECTED_FILTERED_DIST = torch.tensor( [[0.97, 0.0, 0.0, 0.0], [0.0, 0.2, 0.2, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) typical_warp_safety_check = TypicalLogitsWarper(mass=0.5, filter_value=0.0, min_tokens_to_keep=3) scores = typical_warp_safety_check(input_ids, logits) self.assertListEqual((scores == 0.0).to(torch.long).sum(dim=-1).tolist(), [0, 0]) ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) ramp_logits[1] = ramp_logits[1] * 100.0 typical_warp = TypicalLogitsWarper(0.7, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = typical_warp(input_ids, ramp_logits) self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_epsilon_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 dist = torch.log( torch.tensor( [[0.87, 0.099, 0.001, 0.03], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) ) epsilon_warp = EpsilonLogitsWarper(0.1) filtered_dist = torch.exp(epsilon_warp(input_ids, dist)) EXPECTED_FILTERED_DIST = torch.tensor( [[0.87, 0, 0, 0], [0.4, 0.299, 0.101, 0.2]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) ramp_logits[1] = ramp_logits[1] * 100.0 epsilon_warp = EpsilonLogitsWarper(5e-2, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = epsilon_warp(input_ids, ramp_logits) self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [3, 2]) def test_eta_dist_warper(self): input_ids = None vocab_size = 10 batch_size = 2 dist = torch.log( torch.tensor([[0.0, 0.1, 0.8, 0.1], [0.01, 0.04, 0.9, 0.05]], device=torch_device, dtype=torch.float) ) eta_warp = EtaLogitsWarper(0.0625) filtered_dist = torch.exp(eta_warp(input_ids, dist)) EXPECTED_FILTERED_DIST = torch.tensor( [[0.0, 0.1, 0.8, 0.1], [0.0, 0.0, 0.9, 0.0]], device=torch_device, dtype=torch.float ) self.assertTrue(torch.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) ramp_logits = torch.arange(vocab_size, device=torch_device, dtype=torch.float).unsqueeze(0).repeat( batch_size, 1 ) - (vocab_size // 2) ramp_logits[1] = ramp_logits[1] * 100.0 eta_warp = EtaLogitsWarper(0.1, min_tokens_to_keep=2, filter_value=0.0) filtered_dist = eta_warp(input_ids, ramp_logits) self.assertListEqual((filtered_dist != 0.0).to(torch.long).sum(dim=-1).tolist(), [2, 2]) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 input_ids = torch.tensor([[1, 1, 2, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = NoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = NoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [True, False, False]]) self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, False, False], [True, False, False]] ) def test_encoder_no_repeat_ngram_dist_processor(self): vocab_size = 3 num_beams = 2 batch_size = 1 encoder_input_ids = torch.tensor([1, 2, 1, 1], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [8, 0, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) self.assertListEqual(torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False]]) self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False]] ) vocab_size = 3 num_beams = 2 batch_size = 2 encoder_input_ids = torch.tensor([[1, 2, 1, 1], [0, 0, 2, 1]], device=torch_device, dtype=torch.long) input_ids = torch.tensor([[1, 2, 1], [1, 0, 2], [0, 0, 0], [0, 2, 2]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size * num_beams, vocab_size) no_repeat_proc_2_gram = EncoderNoRepeatNGramLogitsProcessor(2, encoder_input_ids=encoder_input_ids) no_repeat_proc_3_gram = EncoderNoRepeatNGramLogitsProcessor(3, encoder_input_ids=encoder_input_ids) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, scores.clone()) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, scores.clone()) self.assertListEqual( torch.isinf(filtered_scores_2_gram).tolist(), [[False, True, True], [False, True, False], [True, False, True], [False, True, False]], ) self.assertListEqual( torch.isinf(filtered_scores_3_gram).tolist(), [[False, True, False], [False, False, False], [False, False, True], [False, False, False]], ) def test_no_bad_words_dist_processor(self): vocab_size = 5 batch_size = 2 eos_token_id = 4 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[True, True, False, True, False], [True, True, True, False, False]] ) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[4]], eos_token_id=eos_token_id) filtered_scores = no_bad_words_dist_proc(input_ids, scores.clone()) self.assertTrue(torch.allclose(scores, filtered_scores, atol=1e-3)) def test_bias_dist_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) positive_bias = {(1,): 100.0, (4,): 100.0} negative_bias = {(1, 0): -100.0, (0, 1, 2): -100.0, (1, 3, 1, 3): -100.0} negative_bias.update({(1, 3, 1, 3, 1, 3): -100.0}) sequence_bias = {**positive_bias, **negative_bias} scores = torch.zeros((batch_size, vocab_size), dtype=torch.float, device=torch_device) bias_dist_proc = SequenceBiasLogitsProcessor(sequence_bias=sequence_bias) filtered_scores = bias_dist_proc(input_ids, scores.clone()) self.assertListEqual( filtered_scores.tolist(), [[-100.0, 100.0, 0.0, -100.0, 100.0], [-100.0, 100.0, -100.0, 0.0, 100.0]] ) def test_processor_list(self): batch_size = 4 sequence_length = 10 vocab_size = 15 eos_token_id = 0 input_ids = ids_tensor((batch_size, sequence_length), vocab_size) input_ids_comp = input_ids.clone() scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = scores.clone() min_dist_proc = MinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = RepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TopKLogitsWarper(3) top_p_warp = TopPLogitsWarper(0.8) no_repeat_proc = NoRepeatNGramLogitsProcessor(2) no_bad_words_dist_proc = NoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) scores = min_dist_proc(input_ids, scores) scores = temp_dist_warp(input_ids, scores) scores = rep_penalty_proc(input_ids, scores) scores = top_k_warp(input_ids, scores) scores = top_p_warp(input_ids, scores) scores = no_repeat_proc(input_ids, scores) scores = no_bad_words_dist_proc(input_ids, scores) processor = LogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, no_repeat_proc, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp) self.assertTrue(torch.allclose(scores, scores_comp, atol=1e-3)) self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist()) def test_prefix_constrained_logits_processor(self): vocab_size = 5 batch_size = 2 input_ids = torch.tensor([[0, 1, 3, 1], [0, 1, 0, 1]], device=torch_device, dtype=torch.long) scores = self._get_uniform_logits(batch_size, vocab_size) def prefix_allowed_tokens_fn(batch_id, inputs_ids): return [[0, 1], [2, 3]][batch_id] prefix_constrained_logits_proc = PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, 1) filtered_scores = prefix_constrained_logits_proc(input_ids, scores.clone()) self.assertListEqual( torch.isinf(filtered_scores).tolist(), [[False, False, True, True, True], [True, True, False, False, True]] ) def test_hamming_diversity(self): vocab_size = 4 num_beams = 2 num_beam_groups = 2 scores = self._get_uniform_logits(num_beams, vocab_size) current_tokens = torch.tensor([0, 3, 1, 2], device=torch_device, dtype=torch.long) diversity_logits_processor = HammingDiversityLogitsProcessor( diversity_penalty=1.0, num_beams=num_beams, num_beam_groups=num_beam_groups ) processed_scores = diversity_logits_processor(None, scores, current_tokens, 1) self.assertTrue( torch.allclose( processed_scores[0], torch.tensor([-0.7500, 0.2500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) self.assertTrue( torch.allclose( processed_scores[1], torch.tensor([0.2500, -0.7500, 0.2500, 0.2500], device=torch_device), atol=1e-3 ) ) def test_forced_bos_token_logits_processor(self): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = ForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) input_ids = ids_tensor((batch_size, 1), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0]) input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_forced_eos_token_logits_processor(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = ForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) input_ids = ids_tensor((batch_size, 4), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertTrue(torch.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0]) input_ids = ids_tensor((batch_size, 3), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores) self.assertFalse(torch.isinf(scores).any()) def test_remove_nan_inf_logits_processor(self): scores = torch.tensor( [[0.0, 0.7, 0.8, float("nan")], [0.1, float("inf"), 0.3, float("-inf")]], device=torch_device ) input_ids = ids_tensor((2, 4), vocab_size=20) logits_processor = InfNanRemoveLogitsProcessor() scores = logits_processor(input_ids, scores) self.assertTrue( torch.allclose( scores, torch.tensor( [[0.0, 0.7, 0.8, 0.0], [0.1, torch.finfo(scores.dtype).max, 0.3, torch.finfo(scores.dtype).min]], device=torch_device, ), atol=1e-6, ) ) def test_exponential_decay_length_penalty(self): vocab_size = 20 batch_size = 4 eos_token_id = 0 penalty_start = 5 penalty_factor = 1.1 input_ids = ids_tensor((batch_size, 2), vocab_size=vocab_size) input_ids_seq_length = input_ids.shape[-1] length_decay_processor = ExponentialDecayLengthPenalty( exponential_decay_length_penalty=(penalty_start, penalty_factor), eos_token_id=eos_token_id, input_ids_seq_length=input_ids_seq_length, ) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_start = torch.clone(scores) scores_before_start = length_decay_processor(input_ids, scores_before_start) self.assertListEqual(scores_before_start[:, eos_token_id].tolist(), scores[:, eos_token_id].tolist()) input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = self._get_uniform_logits(batch_size, vocab_size) scores_after_start = torch.clone(scores) scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) input_ids = ids_tensor((batch_size, 20), vocab_size=vocab_size) scores = torch.neg(self._get_uniform_logits(batch_size, vocab_size)) scores_after_start = torch.clone(scores) scores_after_start = length_decay_processor(input_ids, scores_after_start) self.assertTrue(torch.gt(scores_after_start[:, eos_token_id], scores[:, eos_token_id]).all()) def test_normalization(self): input_ids = None scores = torch.tensor( [[-23.18, -29.96, -43.54, 47.77], [-33.58, -26.87, -32.96, 22.51]], device=torch_device, dtype=torch.float ) logit_normalization = LogitNormalization() normalized_scores = logit_normalization(input_ids, scores).exp() ones = torch.ones(scores.shape[0], device=torch_device, dtype=torch.float) self.assertTrue(normalized_scores.sum(dim=-1).allclose(ones)) self.assertTrue(normalized_scores.allclose(scores.softmax(dim=-1))) def test_classifier_free_guidance(self): class Namespace(dict): pass logits_uncond = torch.tensor([[[1.0, 0, 1.5]]]) logits_cond = torch.tensor([[[1.0, 1.0, 1.0]]]) def dummy_model(input_ids, attention_mask, use_cache=True, past_key_values=None): out = Namespace() out.logits = logits_uncond out.past_key_values = None return out def lsm(x): return torch.nn.functional.log_softmax(x, dim=-1) input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor( 1.5, dummy_model, input_ids, torch.ones_like(input_ids, dtype=torch.long) ) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model, input_ids) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) input_ids = torch.LongTensor([[0]]) cfg = UnbatchedClassifierFreeGuidanceLogitsProcessor(1.5, dummy_model) out = cfg(input_ids, logits_cond)[0, -1] res = (lsm(logits_uncond) + 1.5 * (lsm(logits_cond) - lsm(logits_uncond)))[0, -1] self.assertAlmostEqual(out[0].item(), res[0].item()) self.assertAlmostEqual(out[1].item(), res[1].item()) self.assertAlmostEqual(out[2].item(), res[2].item()) def test_early_stop_processor(self): input_ids = None eos_token_id = 2 min_eos_p = 0.1 scores = self._get_uniform_logits(2, 4) scores[0][eos_token_id] = -6 esp = BarkEosPrioritizerLogitsProcessor(eos_token_id=eos_token_id, min_eos_p=min_eos_p) actual_scores = esp(input_ids, scores) expected_scores_list = [ scores[0].tolist(), [float("-inf"), float("-inf"), scores[0][0], float("-inf")], ] self.assertListEqual(actual_scores.tolist(), expected_scores_list)
codingutf8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license coding utf 8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class StoppingCriteriaTestCase(unittest.TestCase): def _get_tensors(self, length): batch_size = 3 vocab_size = 250 input_ids = ids_tensor((batch_size, length), vocab_size) scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return input_ids, scores def test_list_criteria(self): input_ids, scores = self._get_tensors(5) criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ] ) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_length_criteria(self): criteria = MaxLengthCriteria(max_length=10) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) def test_max_new_tokens_criteria(self): criteria = MaxNewTokensCriteria(start_length=5, max_new_tokens=5) input_ids, scores = self._get_tensors(5) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(9) self.assertFalse(criteria(input_ids, scores)) input_ids, scores = self._get_tensors(10) self.assertTrue(criteria(input_ids, scores)) criteria_list = StoppingCriteriaList([criteria]) self.assertEqual(criteria_list.max_length, 10) def test_max_time_criteria(self): input_ids, scores = self._get_tensors(5) criteria = MaxTimeCriteria(max_time=0.1) self.assertFalse(criteria(input_ids, scores)) criteria = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2) self.assertTrue(criteria(input_ids, scores)) def test_validate_stopping_criteria(self): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10) with self.assertWarns(UserWarning): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11) stopping_criteria = validate_stopping_criteria(StoppingCriteriaList(), 11) self.assertEqual(len(stopping_criteria), 1)
codingutf8 2023 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the greedy text should be printed to stdout except for the final n in the streamer the greedy text should be printed to stdout except for the final n in the streamer tests that we can pass decodekwargs to the streamer to control how the tokens are decoded must be tested with actual models the dummy models tokenizers are not aligned with their models and skipspecialtokenstrue has no effect on them the prompt contains a special token so the streamer should not print it as such the output text when retokenized must only contain one token the streamer will timeout after 0 001 seconds so an exception will be raised coding utf 8 2023 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license the greedy text should be printed to stdout except for the final n in the streamer the greedy text should be printed to stdout except for the final n in the streamer tests that we can pass decode_kwargs to the streamer to control how the tokens are decoded must be tested with actual models the dummy models tokenizers are not aligned with their models and skip_special_tokens true has no effect on them the prompt contains a special token so the streamer should not print it as such the output text when re tokenized must only contain one token remove the final n the streamer will timeout after 0 001 seconds so an exception will be raised
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class StreamerTester(unittest.TestCase): def test_text_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) streamer_text = cs.out[:-1] self.assertEqual(streamer_text, greedy_text) def test_iterator_streamer_matches_non_streaming(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) greedy_text = tokenizer.decode(greedy_ids[0]) streamer = TextIteratorStreamer(tokenizer) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() streamer_text = "" for new_text in streamer: streamer_text += new_text self.assertEqual(streamer_text, greedy_text) def test_text_streamer_skip_prompt(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) greedy_ids = model.generate(input_ids, max_new_tokens=10, do_sample=False) new_greedy_ids = greedy_ids[:, input_ids.shape[1] :] new_greedy_text = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_prompt=True) model.generate(input_ids, max_new_tokens=10, do_sample=False, streamer=streamer) streamer_text = cs.out[:-1] self.assertEqual(streamer_text, new_greedy_text) def test_text_streamer_decode_kwargs(self): tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = torch.ones((1, 5), device=torch_device).long() * model.config.bos_token_id with CaptureStdout() as cs: streamer = TextStreamer(tokenizer, skip_special_tokens=True) model.generate(input_ids, max_new_tokens=1, do_sample=False, streamer=streamer) streamer_text = cs.out[:-1] streamer_text_tokenized = tokenizer(streamer_text, return_tensors="pt") self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1)) def test_iterator_streamer_timeout(self): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model.config.eos_token_id = -1 input_ids = ids_tensor((1, 5), vocab_size=model.config.vocab_size).to(torch_device) streamer = TextIteratorStreamer(tokenizer, timeout=0.001) generation_kwargs = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() with self.assertRaises(Empty): streamer_text = "" for new_text in streamer: streamer_text += new_text
codingutf8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check that min length is applied at length 5 check that min length is not applied anymore at length 15 tweak scores to not be uniform anymore compute softmax uniform distribution stays uniform sharp peaks get higher valleys get lower smooth peaks get lower valleys get higher check that values were correctly changed negative scores for used tokens should increase others should decrease create ramp distribution check that correct tokens are filtered check special cases uniform dist is not changed mintokens overwrites k 3 tokens are kept 2 tokens are nullified create distribution and take log inverse to softmax as taken in tftopplogitswarper topp should have been 0 8 to test the edge case of topp being exactly equal to sum of some token prob however due to the numerical instability of softmax in tf we choose this as the edge case topp as 0 8 passes when usexla is true and fails when false refer pr 18984 dist should be filtered to keep min num values so that sum is topp exp inf 0 check edge cases with negative and extreme logits make ramplogits more extreme make sure at least 2 tokens are kept first batch should keep three tokens second batch would keep only 1 but due to mintokenstokeep2 keeps 2 2gram would forbid 2nd and 3rd token 1 2 at 1st batch and 1st token 0 at 2nd batch 3gram would forbid no token at 1st batch and 1st token 0 at 2nd batch batch 1 1st 2nd and 4th 0 1 3 token are forbidden batch 2 1st 2nd and 3rd 0 1 2 token are forbidden check that all scores are inf except the bostokenid score check that bostokenid is not forced if current length is greater than 1 check that all scores are inf except the eostokenid when maxlength1 is reached check that eostokenid is not forced if maxlength1 is not reached check that no scores are suppressed if beginindex is not reached check that scores are suppressed if beginindex is reached check that suppresstokens are suppressed and others are not check that if the curlen is contained in the forcetokenmap the logits are the same for all tokens except the one the forcetokenmap points to check that if the curlen is not contained in the forcetokenmap the logits are not modified todo joao reintroduce tfnorepeatngramlogitsprocessor when it gets compatible with xla dummy inputids and scores instantiate all dist processors norepeatproc tfnorepeatngramlogitsprocessor2 norepeatproc tf functionnorepeatproc jitcompiletrue no processor list scores norepeatprocinputids scores curlen with processor list norepeatproc remove inf scores should be equal inputids should never be changed coding utf 8 2020 the huggingface team inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a clone of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check that min length is applied at length 5 check that min length is not applied anymore at length 15 tweak scores to not be uniform anymore peak 1st batch valley 1st batch compute softmax uniform distribution stays uniform sharp peaks get higher valleys get lower smooth peaks get lower valleys get higher check that values were correctly changed negative scores for used tokens should increase others should decrease unused tokens should see no change unused tokens should see no change create ramp distribution check that correct tokens are filtered check special cases uniform dist is not changed min_tokens overwrites k 3 tokens are kept 2 tokens are nullified create distribution and take log inverse to softmax as taken in tftopplogitswarper top_p should have been 0 8 to test the edge case of top_p being exactly equal to sum of some token prob however due to the numerical instability of softmax in tf we choose this as the edge case top_p as 0 8 passes when use_xla is true and fails when false refer pr 18984 dist should be filtered to keep min num values so that sum is top_p exp inf 0 check edge cases with negative and extreme logits make ramp_logits more extreme make sure at least 2 tokens are kept first batch should keep three tokens second batch would keep only 1 but due to min_tokens_to_keep 2 keeps 2 2 gram would forbid 2nd and 3rd token 1 2 at 1st batch and 1st token 0 at 2nd batch 3 gram would forbid no token at 1st batch and 1st token 0 at 2nd batch batch 1 1st 2nd and 4th 0 1 3 token are forbidden batch 2 1st 2nd and 3rd 0 1 2 token are forbidden check that all scores are inf except the bos_token_id score score for bos_token_id shold be zero check that bos_token_id is not forced if current length is greater than 1 check that all scores are inf except the eos_token_id when max_length 1 is reached score for eos_token_id should be zero check that eos_token_id is not forced if max_length 1 is not reached check that no scores are suppressed if begin_index is not reached check that scores are suppressed if begin_index is reached check that suppress_tokens are suppressed and others are not check that if the cur_len is contained in the force_token_map the logits are the same for all tokens except the one the force_token_map points to check that if the cur_len is not contained in the force_token_map the logits are not modified todo joao reintroduce tfnorepeatngramlogitsprocessor when it gets compatible with xla dummy input_ids and scores instantiate all dist processors no_repeat_proc tfnorepeatngramlogitsprocessor 2 no_repeat_proc tf function no_repeat_proc jit_compile true no processor list scores no_repeat_proc input_ids scores cur_len with processor list no_repeat_proc remove inf scores should be equal input_ids should never be changed
from __future__ import annotations import unittest import numpy as np from parameterized import parameterized from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers.generation import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, TFSuppressTokensAtBeginLogitsProcessor, TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, ) from ..test_modeling_tf_common import ids_tensor @require_tf class TFLogitsProcessorTest(unittest.TestCase): def _get_uniform_logits(self, batch_size: int, length: int): scores = tf.ones((batch_size, length), dtype=tf.float32) / length return scores @parameterized.expand([(False,), (True,)]) def test_min_length_dist_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 min_dist_processor = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) if use_xla: min_dist_processor = tf.function(min_dist_processor, jit_compile=True) cur_len = 5 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertListEqual(scores_before_min_length[:, eos_token_id].numpy().tolist(), 4 * [-float("inf")]) cur_len = 15 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores_before_min_length = min_dist_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(scores_before_min_length)).numpy()) @parameterized.expand([(False,), (True,)]) def test_temperature_dist_warper(self, use_xla): input_ids = None cur_len = None length = 20 scores = self._get_uniform_logits(batch_size=2, length=length) scores = scores.numpy() scores[1, 5] = (1 / length) + 0.1 scores[1, 10] = (1 / length) - 0.4 scores = tf.convert_to_tensor(scores) probs = tf.nn.softmax(scores, axis=-1) temp_dist_warper_sharper = TFTemperatureLogitsWarper(temperature=0.5) temp_dist_warper_smoother = TFTemperatureLogitsWarper(temperature=1.3) if use_xla: temp_dist_warper_sharper = tf.function(temp_dist_warper_sharper, jit_compile=True) temp_dist_warper_smoother = tf.function(temp_dist_warper_smoother, jit_compile=True) warped_prob_sharp = tf.nn.softmax(temp_dist_warper_sharper(input_ids, tf.identity(scores), cur_len), axis=-1) warped_prob_smooth = tf.nn.softmax(temp_dist_warper_smoother(input_ids, tf.identity(scores), cur_len), axis=-1) tf.debugging.assert_near(probs[0, :], warped_prob_sharp[0, :], atol=1e-3) tf.debugging.assert_near(probs[0, :], warped_prob_smooth[0, :], atol=1e-3) self.assertLess(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_sharp[1, :])) self.assertGreater(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_sharp[1, :])) self.assertGreater(tf.math.reduce_max(probs[1, :]), tf.math.reduce_max(warped_prob_smooth[1, :])) self.assertLess(tf.math.reduce_min(probs[1, :]), tf.math.reduce_min(warped_prob_smooth[1, :])) @parameterized.expand([(False,), (True,)]) def test_repetition_penalty_dist_process(self, use_xla): vocab_size = 10 cur_len = 2 input_ids = tf.constant([[0, 1], [5, 0]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size=2, length=vocab_size) mask = tf.cast(tf.constant([[1] + 9 * [0], 10 * [0]]), tf.bool) scores = tf.where(mask, -1 / vocab_size, scores) mask = tf.cast(tf.constant([10 * [0], 5 * [0] + [1] + 4 * [0]]), tf.bool) scores = tf.where(mask, 4 / vocab_size, scores) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) if use_xla: rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) scores = rep_penalty_proc(input_ids, tf.identity(scores), cur_len) self.assertAlmostEqual(scores[0, 0].numpy(), -(1 / vocab_size) * 2) self.assertAlmostEqual(scores[0, 1].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) self.assertAlmostEqual(scores[1, 0].numpy(), (1 / vocab_size) / 2) self.assertAlmostEqual(scores[1, 5].numpy(), (4 / vocab_size) / 2) self.assertAlmostEqual(scores[0, 2].numpy(), (1 / vocab_size)) @parameterized.expand([(False,), (True,)]) def test_top_k_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 ramp_logits = np.broadcast_to(np.arange(vocab_size, dtype=np.float32), (batch_size, vocab_size)).copy() ramp_logits[1:, : vocab_size // 2] = ramp_logits[1:, : vocab_size // 2] + vocab_size top_k_warp = TFTopKLogitsWarper(3) if use_xla: top_k_warp = tf.function(top_k_warp, jit_compile=True) scores = top_k_warp(input_ids, ramp_logits, cur_len) self.assertListEqual(tf.math.is_inf(scores[0]).numpy().tolist(), 7 * [True] + 3 * [False]) self.assertListEqual(tf.math.is_inf(scores[1]).numpy().tolist(), 2 * [True] + 3 * [False] + 5 * [True]) length = 5 logits = self._get_uniform_logits(batch_size=batch_size, length=length) top_k_warp_safety_check = TFTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3) if use_xla: top_k_warp_safety_check = tf.function(top_k_warp_safety_check, jit_compile=True) scores = top_k_warp_safety_check(input_ids, logits, cur_len) self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [0, 0]) ramp_logits = np.broadcast_to(np.arange(length, dtype=np.float32), (batch_size, length)).copy() scores = top_k_warp_safety_check(input_ids, ramp_logits, cur_len) self.assertListEqual(tf.math.reduce_sum(tf.where(scores == 0.0, 1, 0), axis=-1).numpy().tolist(), [2, 2]) @parameterized.expand([(False,), (True,)]) def test_top_p_dist_warper(self, use_xla): input_ids = None cur_len = None vocab_size = 10 batch_size = 2 dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], dtype=np.float32)) top_p_warp = TFTopPLogitsWarper(0.79999995) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = tf.exp(top_p_warp(input_ids, dist, cur_len)) EXPECTED_FILTERED_DIST = tf.constant([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], dtype=tf.float32) tf.debugging.assert_near(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3) ramp_logits = np.broadcast_to( np.arange(vocab_size, dtype=np.float32)[None, :], (batch_size, vocab_size) ).copy() - (vocab_size // 2) ramp_logits[1] = ramp_logits[1] * 100.0 top_p_warp = TFTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = top_p_warp(input_ids, ramp_logits, cur_len) self.assertListEqual( tf.math.reduce_sum(tf.where(filtered_dist != 0.0, 1, 0), axis=-1).numpy().tolist(), [3, 2] ) def test_no_repeat_ngram_dist_processor(self): vocab_size = 3 batch_size = 2 cur_len = 4 input_ids = tf.constant([[1, 1, 2, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) scores = self._get_uniform_logits(batch_size, vocab_size) no_repeat_proc_2_gram = TFNoRepeatNGramLogitsProcessor(2) no_repeat_proc_3_gram = TFNoRepeatNGramLogitsProcessor(3) filtered_scores_2_gram = no_repeat_proc_2_gram(input_ids, tf.identity(scores), cur_len) filtered_scores_3_gram = no_repeat_proc_3_gram(input_ids, tf.identity(scores), cur_len) self.assertListEqual( tf.math.is_inf(filtered_scores_2_gram).numpy().tolist(), [[False, True, True], [True, False, False]] ) self.assertListEqual( tf.math.is_inf(filtered_scores_3_gram).numpy().tolist(), [[False, False, False], [True, False, False]] ) @parameterized.expand([(False,), (True,)]) def test_no_bad_words_dist_processor(self, use_xla): vocab_size = 5 batch_size = 2 eos_token_id = 4 cur_len = 4 input_ids = tf.constant([[0, 1, 3, 1], [0, 1, 0, 1]], dtype=tf.int32) self.assertEqual(cur_len, input_ids.shape[1]) bad_word_tokens = [[1], [4], [1, 0], [0, 1, 2], [1, 3, 1, 3]] scores = self._get_uniform_logits(batch_size, vocab_size) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=bad_word_tokens, eos_token_id=eos_token_id) if use_xla: no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) filtered_scores = no_bad_words_dist_proc(input_ids, tf.identity(scores), cur_len) self.assertListEqual( tf.math.is_inf(filtered_scores).numpy().tolist(), [[True, True, False, True, True], [True, True, True, False, True]], ) @parameterized.expand([(False,), (True,)]) def test_forced_bos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 bos_token_id = 0 logits_processor = TFForcedBOSTokenLogitsProcessor(bos_token_id=bos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) cur_len = 1 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, bos_token_id + 1 :]) & (scores[:, bos_token_id + 1 :] < 0)) ) self.assertListEqual(scores[:, bos_token_id].numpy().tolist(), 4 * [0]) cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_forced_eos_token_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 eos_token_id = 0 max_length = 5 logits_processor = TFForcedEOSTokenLogitsProcessor(max_length=max_length, eos_token_id=eos_token_id) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) cur_len = 4 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue( tf.math.reduce_all(tf.math.is_inf(scores[:, eos_token_id + 1 :]) & (scores[:, eos_token_id + 1 :] < 0)) ) self.assertListEqual( scores[:, eos_token_id].numpy().tolist(), 4 * [0] ) cur_len = 3 input_ids = ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_at_begin_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 begin_suppress_tokens = [1, 2, 3] begin_index = 5 logits_processor = TFSuppressTokensAtBeginLogitsProcessor( begin_suppress_tokens=begin_suppress_tokens, begin_index=begin_index ) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) cur_len = 4 input_ids = tf.convert_to_tensor([[11, 17, 15, 8], [14, 0, 19, 5], [13, 11, 18, 19], [11, 12, 16, 15]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) cur_len = 5 input_ids = tf.convert_to_tensor([[5, 5, 5, 0, 17], [18, 1, 9, 14, 17], [18, 6, 8, 15, 19], [8, 12, 17, 1, 2]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, begin_suppress_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_suppress_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 suppress_tokens = [1, 3, 5] keep_tokens = [i for i in range(vocab_size) if i not in suppress_tokens] logits_processor = TFSuppressTokensLogitsProcessor(suppress_tokens=suppress_tokens) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) cur_len = 5 input_ids = tf.convert_to_tensor([[0, 10, 19, 6, 3], [17, 4, 8, 17, 2], [7, 1, 11, 6, 15], [5, 8, 13, 16, 0]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, suppress_tokens, axis=1)))) self.assertFalse(tf.math.reduce_any(tf.math.is_inf(tf.gather(scores, keep_tokens, axis=1)))) @parameterized.expand([(False,), (True,)]) def test_force_tokens_logits_processor(self, use_xla): vocab_size = 20 batch_size = 4 force_token_map = {1: 2, 3: 2} logits_processor = TFForceTokensLogitsProcessor(force_token_map=force_token_map) if use_xla: logits_processor = tf.function(logits_processor, jit_compile=True) cur_len = 1 input_ids = tf.convert_to_tensor([[11], [7], [5], [15]]) ids_tensor((batch_size, cur_len), vocab_size=20) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) tf.debugging.assert_near(tf.gather(scores, [force_token_map[cur_len]], axis=1), 0.0) non_forced_inds = [i for i in range(vocab_size) if i != force_token_map[cur_len]] self.assertTrue( tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, [non_forced_inds], axis=1))), ) cur_len = 2 input_ids = tf.convert_to_tensor([[2, 19], [19, 15], [4, 9], [7, 6]]) scores = self._get_uniform_logits(batch_size, vocab_size) scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) @parameterized.expand([(False,), (True,)]) def test_processor_list(self, use_xla): batch_size = 4 cur_len = 10 vocab_size = 15 eos_token_id = 0 input_ids = ids_tensor((batch_size, cur_len), vocab_size) input_ids_comp = tf.identity(input_ids) scores = self._get_uniform_logits(batch_size, vocab_size) scores_comp = tf.identity(scores) min_dist_proc = TFMinLengthLogitsProcessor(min_length=10, eos_token_id=eos_token_id) temp_dist_warp = TFTemperatureLogitsWarper(temperature=0.5) rep_penalty_proc = TFRepetitionPenaltyLogitsProcessor(penalty=2.0) top_k_warp = TFTopKLogitsWarper(3) top_p_warp = TFTopPLogitsWarper(0.8) no_bad_words_dist_proc = TFNoBadWordsLogitsProcessor(bad_words_ids=[[1]], eos_token_id=eos_token_id) if use_xla: min_dist_proc = tf.function(min_dist_proc, jit_compile=True) temp_dist_warp = tf.function(temp_dist_warp, jit_compile=True) rep_penalty_proc = tf.function(rep_penalty_proc, jit_compile=True) top_k_warp = tf.function(top_k_warp, jit_compile=True) top_p_warp = tf.function(top_p_warp, jit_compile=True) no_bad_words_dist_proc = tf.function(no_bad_words_dist_proc, jit_compile=True) scores = min_dist_proc(input_ids, scores, cur_len) scores = temp_dist_warp(input_ids, scores, cur_len) scores = rep_penalty_proc(input_ids, scores, cur_len) scores = top_k_warp(input_ids, scores, cur_len) scores = top_p_warp(input_ids, scores, cur_len) scores = no_bad_words_dist_proc(input_ids, scores, cur_len) processor = TFLogitsProcessorList( [ min_dist_proc, temp_dist_warp, rep_penalty_proc, top_k_warp, top_p_warp, no_bad_words_dist_proc, ] ) scores_comp = processor(input_ids, scores_comp, cur_len) scores = tf.where(tf.math.is_inf(scores), -1e9, scores) scores_comp = tf.where(tf.math.is_inf(scores_comp), -1e9, scores_comp) tf.debugging.assert_near(scores, scores_comp, atol=1e-3) self.assertListEqual(input_ids.numpy().tolist(), input_ids_comp.numpy().tolist())
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this needs to be the same as numhiddenlayers special case for forpretraining model coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this needs to be the same as num_hidden_layers special case for forpretraining model
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class AlbertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, embedding_size=16, hidden_size=36, num_hidden_layers=2, num_hidden_groups=2, num_attention_heads=6, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_hidden_groups = num_hidden_groups self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return AlbertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, sentence_order_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = AlbertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = AlbertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = AlbertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = AlbertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class AlbertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["sentence_order_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = AlbertModelTester(self) self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AlbertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class AlbertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = AlbertModel.from_pretrained("albert-base-v2") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2021 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class FlaxAlbertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = AlbertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class FlaxAlbertModelTest(FlaxModelTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxAlbertModelTester(self) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("albert-base-v2") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs) @require_flax class FlaxAlbertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaxAlbertModel.from_pretrained("albert-base-v2") input_ids = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = (1, 11, 768) self.assertEqual(output.shape, expected_shape) expected_slice = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license inputs inputids inputids attentionmask inputmask tokentypeids tokentypeids sequenceoutput pooledoutput modelinputs special case for forpretraining model coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license inputs input_ids input_ids attention_mask input_mask token_type_ids token_type_ids sequence_output pooled_output model inputs special case for forpretraining model
from __future__ import annotations import unittest from transformers import AlbertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING from transformers.models.albert.modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertModel, ) class TFAlbertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, embedding_size=16, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.embedding_size = 16 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = AlbertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, embedding_size=self.embedding_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_albert_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFAlbertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_albert_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFAlbertForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, self.num_labels)) def create_and_check_albert_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFAlbertForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_albert_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFAlbertForSequenceClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_albert_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFAlbertForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_albert_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFAlbertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.num_choices]) def create_and_check_albert_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFAlbertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertListEqual(list(result["logits"].shape), [self.batch_size, self.seq_length, self.num_labels]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFAlbertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFAlbertModel, TFAlbertForPreTraining, TFAlbertForMaskedLM, TFAlbertForSequenceClassification, TFAlbertForQuestionAnswering, TFAlbertForTokenClassification, TFAlbertForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFAlbertModel, "fill-mask": TFAlbertForMaskedLM, "question-answering": TFAlbertForQuestionAnswering, "text-classification": TFAlbertForSequenceClassification, "token-classification": TFAlbertForTokenClassification, "zero-shot": TFAlbertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(TF_MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["sentence_order_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) return inputs_dict def setUp(self): self.model_tester = TFAlbertModelTester(self) self.config_tester = ConfigTester(self, config_class=AlbertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_albert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_multiple_choice(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_sequence_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_albert_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFAlbertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFAlbertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFAlbertForPreTraining.from_pretrained("albert-base-v2") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 30000] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [4.595668, 0.74462754, -1.818147], [4.5954347, 0.7454184, -1.8188258], [4.5954905, 0.7448235, -1.8182316], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
codingutf8 2019 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test converttokentoid and convertidtotoken token pad tokenid 0 self assertequalself gettokenizer converttokentoidtoken tokenid self assertequalself gettokenizer convertidtotokentokenid token def testgetvocabself vocabkeys listself gettokenizer getvocab keys self assertequalvocabkeys0 pad self assertequalvocabkeys1 unk self assertequalvocabkeys1 eloquent self assertequallenvocabkeys 30000 def testvocabsizeself self assertequalself gettokenizer vocabsize 30000 def testrustandpythonfulltokenizersself if not self testrusttokenizer return tokenizer self gettokenizer rusttokenizer self getrusttokenizer sequence i was born in 92000 and this is fals tokens tokenizer tokenizesequence rusttokens rusttokenizer tokenizesequence self assertlistequaltokens rusttokens ids tokenizer encodesequence addspecialtokensfalse rustids rusttokenizer encodesequence addspecialtokensfalse self assertlistequalids rustids rusttokenizer self getrusttokenizer ids tokenizer encodesequence rustids rusttokenizer encodesequence self assertlistequalids rustids def testfulltokenizerself tokenizer alberttokenizersamplevocab keepaccentstrue tokens tokenizer tokenizethis is a test self assertlistequaltokens this is a test self assertlistequaltokenizer converttokenstoidstokens 48 25 21 1289 tokens tokenizer tokenizei was born in 92000 and this is fals self assertlistequal tokens i was born in 9 2000 and this is fal s ids tokenizer converttokenstoidstokens self assertlistequalids 31 23 386 19 561 3050 15 17 48 25 8256 18 1 9 backtokens tokenizer convertidstotokensids self assertlistequal backtokens i was born in 9 2000 and this is fal s unk def testsequencebuildersself tokenizer alberttokenizersamplevocab text tokenizer encodesequence builders text2 tokenizer encodemultisequence build encodedsentence tokenizer buildinputswithspecialtokenstext encodedpair tokenizer buildinputswithspecialtokenstext text2 assert encodedsentence tokenizer clstokenid text tokenizer septokenid assert encodedpair tokenizer clstokenid text tokenizer septokenid text2 tokenizer septokenid slow def testtokenizerintegrationself expectedencoding attentionmask 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 inputids 2 21970 13 5 6092 167 28 7103 2153 673 8 7028 12051 18 17 7103 2153 673 8 3515 18684 8 4461 6 1927 297 8 12060 2607 18 13 5 4461 15 10538 38 8 135 15 822 58 15 993 10363 15 1460 8005 4461 15 993 255 2328 9 9 9 6 26 1112 816 3260 13 5 103 2377 6 17 1112 816 2782 13 5 103 10641 6 29 84 2512 2430 782 18684 2761 19 808 2430 2556 17 855 1480 9477 4091 128 11712 15 7103 2153 673 17 24883 9990 9 3 2 11502 25 1006 20 782 8 11809 855 1732 19393 18667 37 367 21018 69 1854 34 11860 19124 27 156 225 17 193 4141 19 65 9124 9 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 14 2231 886 2385 17659 84 14 16792 1952 9 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 tokentypeids 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 fmt skip self tokenizerintegrationtestutil expectedencodingexpectedencoding modelnamealbertbasev2 revision6b6560eaf5ff2e250b00c50f380c5389a9c2d82e coding utf 8 2019 hugging face inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license we have a sentencepiece fixture for testing test _convert_token_to_id and _convert_id_to_token fmt skip
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class AlbertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = AlbertTokenizer rust_tokenizer_class = AlbertTokenizerFast test_rust_tokenizer = True test_sentencepiece = True test_sentencepiece_ignore_case = True def setUp(self): super().setUp() tokenizer = AlbertTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def test_convert_token_and_id(self): token = "<pad>" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<pad>") self.assertEqual(vocab_keys[1], "<unk>") self.assertEqual(vocab_keys[-1], "▁eloquent") self.assertEqual(len(vocab_keys), 30_000) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 30_000) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_full_tokenizer(self): tokenizer = AlbertTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁this", "▁is", "▁a", "▁test"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [48, 25, 21, 1289]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."], ) def test_sequence_builders(self): tokenizer = AlbertTokenizer(SAMPLE_VOCAB) text = tokenizer.encode("sequence builders") text_2 = tokenizer.encode("multi-sequence build") encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ] @slow def test_tokenizer_integration(self): expected_encoding = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="albert-base-v2", revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e", )
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch align model import inspect import os import tempfile import unittest import requests from transformers import alignconfig alignprocessor aligntextconfig alignvisionconfig from transformers testingutils import isflaxavailable requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from transformers import alignmodel aligntextmodel alignvisionmodel from transformers models align modelingalign import alignpretrainedmodelarchivelist if isvisionavailable from pil import image if isflaxavailable pass class alignvisionmodeltester def init self parent batchsize12 imagesize32 numchannels3 kernelsizes3 3 5 inchannels32 16 24 outchannels16 24 30 hiddendim64 strides1 1 2 numblockrepeats1 1 2 expandratios1 6 6 istrainingtrue hiddenactgelu self parent parent self batchsize batchsize self imagesize imagesize self numchannels numchannels self kernelsizes kernelsizes self inchannels inchannels self outchannels outchannels self hiddendim hiddendim self strides strides self numblockrepeats numblockrepeats self expandratios expandratios self istraining istraining self hiddenact hiddenact def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return alignvisionconfig numchannelsself numchannels kernelsizesself kernelsizes inchannelsself inchannels outchannelsself outchannels hiddendimself hiddendim stridesself strides numblockrepeatsself numblockrepeats expandratiosself expandratios hiddenactself hiddenact def createandcheckmodelself config pixelvalues model alignvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues patchsize self imagesize 4 self parent assertequal result lasthiddenstate shape self batchsize config hiddendim patchsize patchsize self parent assertequalresult pooleroutput shape self batchsize config hiddendim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class alignvisionmodeltestmodeltestermixin unittest testcase allmodelclasses alignvisionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false hasattentions false def setupself self modeltester alignvisionmodeltesterself self configtester configtester self configclassalignvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self createandtestconfigcommonproperties self configtester createandtestconfigtojsonstring self configtester createandtestconfigtojsonfile self configtester createandtestconfigfromandsavepretrained self configtester createandtestconfigwithnumlabels self configtester checkconfigcanbeinitwithoutparams self configtester checkconfigargumentsinit def createandtestconfigcommonpropertiesself return unittest skipreasonalignvisionmodel does not use inputsembeds def testinputsembedsself pass unittest skipreasonalignvisionmodel does not support input and output embeddings def testmodelcommonattributesself pass def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testhiddenstatesoutputself def checkhiddenstatesoutputinputsdict config modelclass model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass hiddenstates outputs encoderhiddenstates if config isencoderdecoder else outputs hiddenstates numblocks sumconfig numblockrepeats 4 self assertequallenhiddenstates numblocks self assertlistequal listhiddenstates0 shape2 self modeltester imagesize 2 self modeltester imagesize 2 config inputsdict self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses inputsdictoutputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass check that outputhiddenstates also work using config del inputsdictoutputhiddenstates config outputhiddenstates true checkhiddenstatesoutputinputsdict config modelclass def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass slow def testmodelfrompretrainedself for modelname in alignpretrainedmodelarchivelist 1 model alignvisionmodel frompretrainedmodelname self assertisnotnonemodel class aligntextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue usetokentypeidstrue vocabsize99 hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 maxpositionembeddings512 typevocabsize16 typesequencelabelsize2 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self usetokentypeids usetokentypeids self vocabsize vocabsize self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self maxpositionembeddings maxpositionembeddings self typevocabsize typevocabsize self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength tokentypeids none if self usetokentypeids tokentypeids idstensorself batchsize self seqlength self typevocabsize config self getconfig return config inputids tokentypeids inputmask def getconfigself return aligntextconfig vocabsizeself vocabsize hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob maxpositionembeddingsself maxpositionembeddings typevocabsizeself typevocabsize isdecoderfalse initializerrangeself initializerrange def createandcheckmodelself config inputids tokentypeids inputmask model aligntextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask tokentypeidstokentypeids result modelinputids tokentypeidstokentypeids result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask return config inputsdict requiretorch class aligntextmodeltestmodeltestermixin unittest testcase allmodelclasses aligntextmodel if istorchavailable else fxcompatible false testpruning false testheadmasking false def setupself self modeltester aligntextmodeltesterself self configtester configtesterself configclassaligntextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonalign does not use inputsembeds def testinputsembedsself pass unittest skipreasonaligntextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonaligntextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in alignpretrainedmodelarchivelist 1 model aligntextmodel frompretrainedmodelname self assertisnotnonemodel class alignmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester aligntextmodeltesterparent textkwargs self visionmodeltester alignvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself testconfig inputids tokentypeids inputmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids tokentypeids inputmask pixelvalues def getconfigself return alignconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids tokentypeids attentionmask pixelvalues model alignmodelconfig totorchdevice eval with torch nograd result modelinputids pixelvalues attentionmask tokentypeids self parent assertequal result logitsperimage shape self visionmodeltester batchsize self textmodeltester batchsize self parent assertequal result logitspertext shape self textmodeltester batchsize self visionmodeltester batchsize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids tokentypeids inputmask pixelvalues configandinputs inputsdict inputids inputids tokentypeids tokentypeids attentionmask inputmask pixelvalues pixelvalues returnloss true return config inputsdict requiretorch class alignmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses alignmodel if istorchavailable else pipelinemodelmapping featureextraction alignmodel if istorchavailable else fxcompatible false testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false def setupself self modeltester alignmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonstart to fail after using torch cu118 def testmultigpudataparallelforwardself super testmultigpudataparallelforward unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonalignmodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the temperature parameter initilization is different for align def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if temperature is initilized as per the original implementation if name temperature self assertalmostequal param data item 1 0 delta1e3 msgfparameter name of model modelclass seems not properly initialized elif name textprojection weight self asserttrue 1 0 param data mean 1e9 round 1e9 item 1 0 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues align needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal def testloadvisiontextconfigself config inputsdict self modeltester prepareconfigandinputsforcommon save alignconfig and check if we can load alignvisionconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname visionconfig alignvisionconfig frompretrainedtmpdirname self assertdictequalconfig visionconfig todict visionconfig todict save alignconfig and check if we can load aligntextconfig from it with tempfile temporarydirectory as tmpdirname config savepretrainedtmpdirname textconfig aligntextconfig frompretrainedtmpdirname self assertdictequalconfig textconfig todict textconfig todict slow def testmodelfrompretrainedself for modelname in alignpretrainedmodelarchivelist 1 model alignmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg im image openrequests geturl streamtrue raw return im requirevision requiretorch class alignmodelintegrationtestunittest testcase slow def testinferenceself modelname kakaobrainalignbase model alignmodel frompretrainedmodelname totorchdevice processor alignprocessor frompretrainedmodelname image prepareimg texts a photo of a cat a photo of a dog inputs processortexttexts imagesimage returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits self assertequal outputs logitsperimage shape torch sizeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape torch sizeinputs inputids shape0 inputs pixelvalues shape0 expectedlogits torch tensor9 7093 3 4679 devicetorchdevice self asserttruetorch allcloseoutputs logitsperimage expectedlogits atol1e3 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch align model here we also overwrite some of the tests of test_modeling_common py as align does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic check that output_hidden_states also work using config override as the temperature parameter initilization is different for align check if temperature is initilized as per the original implementation to be sure we have no nan align needs pixel_values save alignconfig and check if we can load alignvisionconfig from it save alignconfig and check if we can load aligntextconfig from it we will verify our results on an image of cute cats forward pass verify the logits
import inspect import os import tempfile import unittest import requests from transformers import AlignConfig, AlignProcessor, AlignTextConfig, AlignVisionConfig from transformers.testing_utils import ( is_flax_available, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AlignModel, AlignTextModel, AlignVisionModel, ) from transformers.models.align.modeling_align import ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image if is_flax_available(): pass class AlignVisionModelTester: def __init__( self, parent, batch_size=12, image_size=32, num_channels=3, kernel_sizes=[3, 3, 5], in_channels=[32, 16, 24], out_channels=[16, 24, 30], hidden_dim=64, strides=[1, 1, 2], num_block_repeats=[1, 1, 2], expand_ratios=[1, 6, 6], is_training=True, hidden_act="gelu", ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.hidden_dim = hidden_dim self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.is_training = is_training self.hidden_act = hidden_act def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return AlignVisionConfig( num_channels=self.num_channels, kernel_sizes=self.kernel_sizes, in_channels=self.in_channels, out_channels=self.out_channels, hidden_dim=self.hidden_dim, strides=self.strides, num_block_repeats=self.num_block_repeats, expand_ratios=self.expand_ratios, hidden_act=self.hidden_act, ) def create_and_check_model(self, config, pixel_values): model = AlignVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) patch_size = self.image_size // 4 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, config.hidden_dim, patch_size, patch_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, config.hidden_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class AlignVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (AlignVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = AlignVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=AlignVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="AlignVisionModel does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="AlignVisionModel does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states num_blocks = sum(config.num_block_repeats) * 4 self.assertEqual(len(hidden_states), num_blocks) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): for model_name in ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AlignVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class AlignTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = self.get_config() return config, input_ids, token_type_ids, input_mask def get_config(self): return AlignTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, token_type_ids, input_mask): model = AlignTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class AlignTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (AlignTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = AlignTextModelTester(self) self.config_tester = ConfigTester(self, config_class=AlignTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="ALIGN does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="AlignTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="AlignTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AlignTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class AlignModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = AlignTextModelTester(parent, **text_kwargs) self.vision_model_tester = AlignVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): test_config, input_ids, token_type_ids, input_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, token_type_ids, input_mask, pixel_values def get_config(self): return AlignConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, token_type_ids, attention_mask, pixel_values): model = AlignModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask, token_type_ids) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, input_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_torch class AlignModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AlignModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": AlignModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = AlignModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Start to fail after using torch `cu118`.") def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="AlignModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "temperature": self.assertAlmostEqual( param.data.item(), 1.0, delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif name == "text_projection.weight": self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = AlignVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = AlignTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): for model_name in ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AlignModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class AlignModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "kakaobrain/align-base" model = AlignModel.from_pretrained(model_name).to(torch_device) processor = AlignProcessor.from_pretrained(model_name) image = prepare_img() texts = ["a photo of a cat", "a photo of a dog"] inputs = processor(text=texts, images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[9.7093, 3.4679]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch altclip model import inspect import os import tempfile import unittest import numpy as np import requests from transformers import altclipconfig altclipprocessor altcliptextconfig altclipvisionconfig from transformers testingutils import requiretorch requirevision slow torchdevice from transformers utils import istorchavailable isvisionavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin configzeroinit floatstensor idstensor randomattentionmask from testpipelinemixin import pipelinetestermixin if istorchavailable import torch import torch nn as nn from transformers import altclipmodel altcliptextmodel altclipvisionmodel from transformers models altclip modelingaltclip import altclippretrainedmodelarchivelist if isvisionavailable from pil import image class altclipvisionmodeltester def init self parent batchsize12 imagesize30 patchsize2 numchannels3 istrainingtrue hiddensize32 projectiondim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 initializerrange0 02 scopenone self parent parent self batchsize batchsize self imagesize imagesize self patchsize patchsize self numchannels numchannels self istraining istraining self hiddensize hiddensize self projectiondim projectiondim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self initializerrange initializerrange self scope scope in vit the seq length equals the number of patches 1 we add 1 for the cls token numpatches imagesize patchsize 2 self seqlength numpatches 1 def prepareconfigandinputsself pixelvalues floatstensorself batchsize self numchannels self imagesize self imagesize config self getconfig return config pixelvalues def getconfigself return altclipvisionconfig imagesizeself imagesize patchsizeself patchsize numchannelsself numchannels hiddensizeself hiddensize projectiondimself projectiondim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout initializerrangeself initializerrange def createandcheckmodelself config pixelvalues model altclipvisionmodelconfigconfig model totorchdevice model eval with torch nograd result modelpixelvalues expected sequence length numpatches 1 we add 1 for the cls token imagesize self imagesize self imagesize patchsize self patchsize self patchsize numpatches imagesize1 patchsize1 imagesize0 patchsize0 self parent assertequalresult lasthiddenstate shape self batchsize numpatches 1 self hiddensize self parent assertequalresult pooleroutput shape self batchsize self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config pixelvalues configandinputs inputsdict pixelvalues pixelvalues return config inputsdict requiretorch class altclipvisionmodeltestmodeltestermixin unittest testcase allmodelclasses altclipvisionmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false def setupself self modeltester altclipvisionmodeltesterself self configtester configtester self configclassaltclipvisionconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonclip does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pixelvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skipreasonaltclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonaltclipvisionmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass unittest skipreasonaltclipvisionmodel use the same cv backbone with clip model def testmodelfrompretrainedself pass class altcliptextmodeltester def init self parent batchsize12 seqlength7 istrainingtrue useinputmasktrue uselabelstrue vocabsize99 hiddensize32 projectiondim32 projectdim32 numhiddenlayers2 numattentionheads4 intermediatesize37 dropout0 1 attentiondropout0 1 maxpositionembeddings512 initializerrange0 02 scopenone self parent parent self batchsize batchsize self seqlength seqlength self istraining istraining self useinputmask useinputmask self uselabels uselabels self vocabsize vocabsize self hiddensize hiddensize self projectiondim projectiondim self projectdim projectdim self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self dropout dropout self attentiondropout attentiondropout self maxpositionembeddings maxpositionembeddings self initializerrange initializerrange self scope scope def prepareconfigandinputsself inputids idstensorself batchsize self seqlength self vocabsize inputmask none if self useinputmask inputmask randomattentionmaskself batchsize self seqlength if inputmask is not none batchsize seqlength inputmask shape rndstartindices np random randint1 seqlength 1 sizebatchsize for batchidx startindex in enumeraterndstartindices inputmaskbatchidx startindex 1 inputmaskbatchidx startindex 0 config self getconfig return config inputids inputmask def getconfigself return altcliptextconfig vocabsizeself vocabsize hiddensizeself hiddensize projectiondimself projectiondim projectdimself projectdim numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize dropoutself dropout attentiondropoutself attentiondropout maxpositionembeddingsself maxpositionembeddings initializerrangeself initializerrange padtokenid1 def createandcheckmodelself config inputids inputmask model altcliptextmodelconfigconfig model totorchdevice model eval with torch nograd result modelinputids attentionmaskinputmask result modelinputids self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize self parent assertequalresult pooleroutput shape self batchsize self projectiondim def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids inputmask configandinputs inputsdict inputids inputids attentionmask inputmask return config inputsdict requiretorch class altcliptextmodeltestmodeltestermixin unittest testcase allmodelclasses altcliptextmodel if istorchavailable else fxcompatible true testpruning false testheadmasking false todo sunmarc fix me unittest skipit s broken def testresizetokensembeddingsself super testresizetokensembeddings def setupself self modeltester altcliptextmodeltesterself self configtester configtesterself configclassaltcliptextconfig hiddensize37 def testconfigself self configtester runcommontests def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs def testtrainingself pass def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass def testmodeloutputsequivalenceself pass unittest skipreasonresult of the model is a dict def testhiddenstatesoutputself pass unittest skipreasonaltclip does not use inputsembeds def testinputsembedsself pass unittest skipreasonaltcliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinitfrombaseself pass unittest skipreasonaltcliptextmodel has no base class and is not available in modelmapping def testsaveloadfastinittobaseself pass slow def testmodelfrompretrainedself for modelname in altclippretrainedmodelarchivelist 1 model altcliptextmodel frompretrainedmodelname self assertisnotnonemodel class altclipmodeltester def initself parent textkwargsnone visionkwargsnone istrainingtrue if textkwargs is none textkwargs if visionkwargs is none visionkwargs self parent parent self textmodeltester altcliptextmodeltesterparent textkwargs self visionmodeltester altclipvisionmodeltesterparent visionkwargs self istraining istraining def prepareconfigandinputsself textconfig inputids attentionmask self textmodeltester prepareconfigandinputs visionconfig pixelvalues self visionmodeltester prepareconfigandinputs config self getconfig return config inputids attentionmask pixelvalues def getconfigself return altclipconfig fromtextvisionconfigs self textmodeltester getconfig self visionmodeltester getconfig projectiondim64 def createandcheckmodelself config inputids attentionmask pixelvalues model altclipmodelconfigconfig model totorchdevice model eval with torch nograd modelinputids pixelvalues attentionmask def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputids attentionmask pixelvalues configandinputs inputsdict inputids inputids attentionmask attentionmask pixelvalues pixelvalues returnloss true return config inputsdict we will verify our results on an image of cute cats def prepareimg url http images cocodataset orgval2017000000039769 jpg im image openrequests geturl streamtrue raw return im requiretorch class altclipmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses altclipmodel if istorchavailable else pipelinemodelmapping featureextraction altclipmodel if istorchavailable else fxcompatible true testheadmasking false testpruning false testresizeembeddings false testattentionoutputs false todo fix the failed tests when this model gets more usage def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename featureextractionpipelinetests return true return false def setupself self modeltester altclipmodeltesterself def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs unittest skipreasonhiddenstates is tested in individual model tests def testhiddenstatesoutputself pass unittest skipreasoninputsembeds is tested in individual model tests def testinputsembedsself pass unittest skipreasonretaingrad is tested in individual model tests def testretaingradhiddenstatesattentionsself pass unittest skipreasonclipmodel does not have inputoutput embeddings def testmodelcommonattributesself pass override as the logitscale parameter initilization is different for altclip def testinitializationself config inputsdict self modeltester prepareconfigandinputsforcommon configsnoinit configzeroinitconfig for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit for name param in model namedparameters if param requiresgrad check if logitscale is initilized as per the original implementation if name logitscale self assertalmostequal param data item np log1 0 07 delta1e3 msgfparameter name of model modelclass seems not properly initialized else self assertin param data mean 1e9 round 1e9 item 0 0 1 0 msgfparameter name of model modelclass seems not properly initialized def createandchecktorchscriptself config inputsdict if not self testtorchscript return configsnoinit configzeroinitconfig to be sure we have no nan configsnoinit torchscript true configsnoinit returndict false for modelclass in self allmodelclasses model modelclassconfigconfigsnoinit model totorchdevice model eval try inputids inputsdictinputids pixelvalues inputsdictpixelvalues clip needs pixelvalues tracedmodel torch jit tracemodel inputids pixelvalues except runtimeerror self failcouldn t trace module with tempfile temporarydirectory as tmpdirname ptfilename os path jointmpdirname tracedmodel pt try torch jit savetracedmodel ptfilename except exception self failcouldn t save module try loadedmodel torch jit loadptfilename except exception self failcouldn t load module model totorchdevice model eval loadedmodel totorchdevice loadedmodel eval modelstatedict model statedict loadedmodelstatedict loadedmodel statedict nonpersistentbuffers for key in loadedmodelstatedict keys if key not in modelstatedict keys nonpersistentbufferskey loadedmodelstatedictkey loadedmodelstatedict key value for key value in loadedmodelstatedict items if key not in nonpersistentbuffers self assertequalsetmodelstatedict keys setloadedmodelstatedict keys modelbuffers listmodel buffers for nonpersistentbuffer in nonpersistentbuffers values foundbuffer false for i modelbuffer in enumeratemodelbuffers if torch equalnonpersistentbuffer modelbuffer foundbuffer true break self asserttruefoundbuffer modelbuffers popi modelsequal true for layername p1 in modelstatedict items p2 loadedmodelstatedictlayername if p1 data nep2 data sum 0 modelsequal false self asserttruemodelsequal slow def testmodelfrompretrainedself for modelname in altclippretrainedmodelarchivelist 1 model altclipmodel frompretrainedmodelname self assertisnotnonemodel requirevision requiretorch class altclipmodelintegrationtestunittest testcase slow def testinferenceself modelname baaialtclip model altclipmodel frompretrainedmodelname totorchdevice processor altclipprocessor frompretrainedmodelname image prepareimg inputs processortext imagesimage paddingtrue returntensorspt totorchdevice fmt skip forward pass with torch nograd outputs modelinputs verify the logits self assertequal outputs logitsperimage shape torch sizeinputs pixelvalues shape0 inputs inputids shape0 self assertequal outputs logitspertext shape torch sizeinputs inputids shape0 inputs pixelvalues shape0 probs outputs logitsperimage softmaxdim1 expectedprobs torch tensor9 9942e01 5 7805e04 devicetorchdevice self asserttruetorch allcloseprobs expectedprobs atol5e3 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch altclip model in vit the seq length equals the number of patches 1 we add 1 for the cls token expected sequence length num_patches 1 we add 1 for the cls token here we also overwrite some of the tests of test_modeling_common py as clip does not use input_ids inputs_embeds attention_mask and seq_length signature parameters is an ordereddict so arg_names order is deterministic todo sunmarc fix me we will verify our results on an image of cute cats todo fix the failed tests when this model gets more usage override as the logit_scale parameter initilization is different for altclip check if logit_scale is initilized as per the original implementation to be sure we have no nan clip needs pixel_values fmt skip forward pass verify the logits
import inspect import os import tempfile import unittest import numpy as np import requests from transformers import AltCLIPConfig, AltCLIPProcessor, AltCLIPTextConfig, AltCLIPVisionConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn as nn from transformers import AltCLIPModel, AltCLIPTextModel, AltCLIPVisionModel from transformers.models.altclip.modeling_altclip import ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class AltCLIPVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return AltCLIPVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = AltCLIPVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class AltCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (AltCLIPVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = AltCLIPVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=AltCLIPVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="AltCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="AltCLIPVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="AltCLIPVisionModel use the same cv backbone with CLIP model.") def test_model_from_pretrained(self): pass class AltCLIPTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, project_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.project_dim = project_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return AltCLIPTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, project_dim=self.project_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, pad_token_id=1, ) def create_and_check_model(self, config, input_ids, input_mask): model = AltCLIPTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class AltCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (AltCLIPTextModel,) if is_torch_available() else () fx_compatible = True test_pruning = False test_head_masking = False @unittest.skip("It's broken.") def test_resize_tokens_embeddings(self): super().test_resize_tokens_embeddings() def setUp(self): self.model_tester = AltCLIPTextModelTester(self) self.config_tester = ConfigTester(self, config_class=AltCLIPTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_training(self): pass def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_model_outputs_equivalence(self): pass @unittest.skip(reason="Result of the model is a dict") def test_hidden_states_output(self): pass @unittest.skip(reason="AltCLIP does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="AltCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="AltCLIPTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): for model_name in ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AltCLIPTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class AltCLIPModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = AltCLIPTextModelTester(parent, **text_kwargs) self.vision_model_tester = AltCLIPVisionModelTester(parent, **vision_kwargs) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return AltCLIPConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = AltCLIPModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): model(input_ids, pixel_values, attention_mask) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_torch class AltCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AltCLIPModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": AltCLIPModel} if is_torch_available() else {} fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "FeatureExtractionPipelineTests": return True return False def setUp(self): self.model_tester = AltCLIPModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @slow def test_model_from_pretrained(self): for model_name in ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = AltCLIPModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_torch class AltCLIPModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "BAAI/AltCLIP" model = AltCLIPModel.from_pretrained(model_name).to(torch_device) processor = AltCLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor(text=["一张猫的照片", "一张狗的照片"], images=image, padding=True, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) probs = outputs.logits_per_image.softmax(dim=1) expected_probs = torch.tensor([[9.9942e-01, 5.7805e-04]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=5e-3))
codingutf8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper testfeatureextractionwhisper floatslist creates a random float32 tensor if rng is none rng globalrng values for batchidx in rangeshape0 values append for in rangeshape1 values1 appendrng random scale return values class astfeatureextractiontesterunittest testcase def init self parent batchsize7 minseqlength400 maxseqlength2000 featuresize1 paddingvalue0 0 samplingrate16000 returnattentionmasktrue donormalizetrue self parent parent self batchsize batchsize self minseqlength minseqlength self maxseqlength maxseqlength self seqlengthdiff self maxseqlength self minseqlength self batchsize 1 self featuresize featuresize self paddingvalue paddingvalue self samplingrate samplingrate self returnattentionmask returnattentionmask self donormalize donormalize def preparefeatextractdictself return featuresize self featuresize paddingvalue self paddingvalue samplingrate self samplingrate returnattentionmask self returnattentionmask donormalize self donormalize def prepareinputsforcommonself equallengthfalse numpifyfalse def flattenlistoflists return listitertools chainlistoflists if equallength speechinputs floatslistself batchsize self maxseqlength else make sure that inputs increase in size speechinputs flattenfloatslistx self featuresize for x in rangeself minseqlength self maxseqlength self seqlengthdiff if numpify speechinputs np asarrayx for x in speechinputs return speechinputs requiretorch requiretorchaudio class astfeatureextractiontestsequencefeatureextractiontestmixin unittest testcase featureextractionclass astfeatureextractor def setupself self featextracttester astfeatureextractiontesterself def testcallself tests that all call wrap to encodeplus and batchencodeplus featextract self featureextractionclassself featextracttester preparefeatextractdict create three inputs of length 800 1000 and 1200 speechinputs floatslist1 x0 for x in range800 1400 200 npspeechinputs np asarrayspeechinput for speechinput in speechinputs test not batched input encodedsequences1 featextractspeechinputs0 returntensorsnp inputvalues encodedsequences2 featextractnpspeechinputs0 returntensorsnp inputvalues self asserttruenp allcloseencodedsequences1 encodedsequences2 atol1e3 test batched encodedsequences1 featextractspeechinputs paddingtrue returntensorsnp inputvalues encodedsequences2 featextractnpspeechinputs paddingtrue returntensorsnp inputvalues for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 test 2d numpy arrays are batched speechinputs floatslist1 x0 for x in 800 800 800 npspeechinputs np asarrayspeechinputs encodedsequences1 featextractspeechinputs returntensorsnp inputvalues encodedsequences2 featextractnpspeechinputs returntensorsnp inputvalues for encseq1 encseq2 in zipencodedsequences1 encodedsequences2 self asserttruenp allcloseencseq1 encseq2 atol1e3 requiretorch def testdoubleprecisionpadself import torch featureextractor self featureextractionclassself featextracttester preparefeatextractdict npspeechinputs np random rand100 astypenp float64 pyspeechinputs npspeechinputs tolist for inputs in pyspeechinputs npspeechinputs npprocessed featureextractor padinputvalues inputs returntensorsnp self asserttruenpprocessed inputvalues dtype np float32 ptprocessed featureextractor padinputvalues inputs returntensorspt self asserttrueptprocessed inputvalues dtype torch float32 def loaddatasamplesself numsamples from datasets import loaddataset ds loaddatasethfinternaltestinglibrispeechasrdummy clean splitvalidation automatic decoding with librispeech speechsamples ds sortid selectrangenumsamples numsamplesaudio return xarray for x in speechsamples requiretorch def testintegrationself fmt off expectedinputvalues torch tensor 0 9894 1 2776 0 9066 1 2776 0 9349 1 2609 1 0386 1 2776 1 1561 1 2776 1 2052 1 2723 1 2190 1 2132 1 2776 1 1133 1 1953 1 1343 1 1584 1 2203 1 1770 1 2474 1 2381 1 1936 0 9270 0 8317 0 8049 0 7706 0 7565 0 7869 fmt on inputspeech self loaddatasamples1 featureextractor astfeatureextractor inputvalues featureextractorinputspeech returntensorspt inputvalues self assertequalsinputvalues shape 1 1024 128 self asserttruetorch allcloseinputvalues0 0 30 expectedinputvalues atol1e4 def testfeatextractfromandsavepretrainedself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname savedfile featextractfirst savepretrainedtmpdirname0 checkjsonfilehascorrectformatsavedfile featextractsecond self featureextractionclass frompretrainedtmpdirname dictfirst featextractfirst todict dictsecond featextractsecond todict self assertdictequaldictfirst dictsecond def testfeatextracttojsonfileself featextractfirst self featureextractionclassself featextractdict with tempfile temporarydirectory as tmpdirname jsonfilepath os path jointmpdirname featextract json featextractfirst tojsonfilejsonfilepath featextractsecond self featureextractionclass fromjsonfilejsonfilepath dictfirst featextractfirst todict dictsecond featextractsecond todict self assertequaldictfirst dictsecond exact same tests than before except that we simulate that torchaudio is not available requiretorch unittest mock patch transformers models audiospectrogramtransformer featureextractionaudiospectrogramtransformer isspeechavailable lambda false class astfeatureextractionwithouttorchaudiotestastfeatureextractiontest def testusingaudioutilsself tests that it uses audioutils instead of torchaudio featextract self featureextractionclassself featextracttester preparefeatextractdict self asserttruehasattrfeatextract window self asserttruehasattrfeatextract melfilters from transformers models audiospectrogramtransformer featureextractionaudiospectrogramtransformer import isspeechavailable self assertfalseisspeechavailable coding utf 8 2022 huggingface inc licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license copied from tests models whisper test_feature_extraction_whisper floats_list creates a random float32 tensor make sure that inputs increase in size tests that all call wrap to encode_plus and batch_encode_plus create three inputs of length 800 1000 and 1200 test not batched input test batched test 2 d numpy arrays are batched automatic decoding with librispeech fmt off fmt on exact same tests than before except that we simulate that torchaudio is not available tests that it uses audio_utils instead of torchaudio
import itertools import os import random import tempfile import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin global_rng = random.Random() if is_torch_available(): import torch def floats_list(shape, scale=1.0, rng=None, name=None): if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class ASTFeatureExtractionTester(unittest.TestCase): def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, return_attention_mask=True, do_normalize=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.return_attention_mask = return_attention_mask self.do_normalize = do_normalize def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class ASTFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ASTFeatureExtractor def setUp(self): self.feat_extract_tester = ASTFeatureExtractionTester(self) def test_call(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] encoded_sequences_1 = feat_extract(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) encoded_sequences_1 = feat_extract(speech_inputs, padding=True, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, padding=True, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) @require_torch def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def test_integration(self): EXPECTED_INPUT_VALUES = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) input_speech = self._load_datasamples(1) feature_extractor = ASTFeatureExtractor() input_values = feature_extractor(input_speech, return_tensors="pt").input_values self.assertEquals(input_values.shape, (1, 1024, 128)) self.assertTrue(torch.allclose(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, atol=1e-4)) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertDictEqual(dict_first, dict_second) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() self.assertEqual(dict_first, dict_second) @require_torch @unittest.mock.patch( "transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer.is_speech_available", lambda: False, ) class ASTFeatureExtractionWithoutTorchaudioTest(ASTFeatureExtractionTest): def test_using_audio_utils(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) self.assertTrue(hasattr(feat_extract, "window")) self.assertTrue(hasattr(feat_extract, "mel_filters")) from transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer import ( is_speech_available, ) self.assertFalse(is_speech_available())
codingutf8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch audio spectrogram transformer ast model import inspect import unittest from huggingfacehub import hfhubdownload from transformers import astconfig from transformers testingutils import requiretorch requiretorchaudio slow torchdevice from transformers utils import cachedproperty istorchavailable istorchaudioavailable from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin if istorchavailable import torch from torch import nn from transformers import astforaudioclassification astmodel from transformers models audiospectrogramtransformer modelingaudiospectrogramtransformer import audiospectrogramtransformerpretrainedmodelarchivelist if istorchaudioavailable import torchaudio from transformers import astfeatureextractor class astmodeltester def init self parent batchsize13 patchsize2 maxlength24 nummelbins16 istrainingtrue uselabelstrue hiddensize32 numhiddenlayers2 numattentionheads4 intermediatesize37 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 typesequencelabelsize10 initializerrange0 02 scopenone frequencystride2 timestride2 self parent parent self batchsize batchsize self patchsize patchsize self maxlength maxlength self nummelbins nummelbins self istraining istraining self uselabels uselabels self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self typesequencelabelsize typesequencelabelsize self initializerrange initializerrange self scope scope self frequencystride frequencystride self timestride timestride in ast the seq length equals the number of patches 2 we add 2 for the cls and distillation tokens frequencyoutdimension self nummelbins self patchsize self frequencystride 1 timeoutdimension self maxlength self patchsize self timestride 1 numpatches frequencyoutdimension timeoutdimension self seqlength numpatches 2 def prepareconfigandinputsself inputvalues floatstensorself batchsize self maxlength self nummelbins labels none if self uselabels labels idstensorself batchsize self typesequencelabelsize config self getconfig return config inputvalues labels def getconfigself return astconfig patchsizeself patchsize maxlengthself maxlength nummelbinsself nummelbins hiddensizeself hiddensize numhiddenlayersself numhiddenlayers numattentionheadsself numattentionheads intermediatesizeself intermediatesize hiddenactself hiddenact hiddendropoutprobself hiddendropoutprob attentionprobsdropoutprobself attentionprobsdropoutprob isdecoderfalse initializerrangeself initializerrange frequencystrideself frequencystride timestrideself timestride def createandcheckmodelself config inputvalues labels model astmodelconfigconfig model totorchdevice model eval result modelinputvalues self parent assertequalresult lasthiddenstate shape self batchsize self seqlength self hiddensize def prepareconfigandinputsforcommonself configandinputs self prepareconfigandinputs config inputvalues labels configandinputs inputsdict inputvalues inputvalues return config inputsdict requiretorch class astmodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses astmodel astforaudioclassification if istorchavailable else pipelinemodelmapping audioclassification astforaudioclassification featureextraction astmodel if istorchavailable else fxcompatible false testpruning false testresizeembeddings false testheadmasking false todo fix the failed tests when this model gets more usage def ispipelinetesttoskip self pipelinetestcassename configclass modelarchitecture tokenizername processorname if pipelinetestcassename audioclassificationpipelinetests return true return false def setupself self modeltester astmodeltesterself self configtester configtesterself configclassastconfig hastextmodalityfalse hiddensize37 def testconfigself self configtester runcommontests unittest skipreasonast does not use inputsembeds def testinputsembedsself pass def testmodelcommonattributesself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig self assertisinstancemodel getinputembeddings nn module x model getoutputembeddings self asserttruex is none or isinstancex nn linear def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames inputvalues self assertlistequalargnames 1 expectedargnames def testmodelself configandinputs self modeltester prepareconfigandinputs self modeltester createandcheckmodelconfigandinputs slow def testmodelfrompretrainedself for modelname in audiospectrogramtransformerpretrainedmodelarchivelist 1 model astmodel frompretrainedmodelname self assertisnotnonemodel we will verify our results on some audio from audioset def prepareaudio filepath hfhubdownload repoidnielsraudiospectogramtransformercheckpoint filenamesampleaudio flac repotypedataset audio samplingrate torchaudio loadfilepath return audio samplingrate requiretorch requiretorchaudio class astmodelintegrationtestunittest testcase cachedproperty def defaultfeatureextractorself return astfeatureextractor frompretrainedmitastfinetunedaudioset10100 4593 if istorchaudioavailable else none slow def testinferenceaudioclassificationself featureextractor self defaultfeatureextractor model astforaudioclassification frompretrainedmitastfinetunedaudioset10100 4593 totorchdevice featureextractor self defaultfeatureextractor audio samplingrate prepareaudio audio audio squeeze numpy inputs featureextractoraudio samplingratesamplingrate returntensorspt totorchdevice forward pass with torch nograd outputs modelinputs verify the logits expectedshape torch size1 527 self assertequaloutputs logits shape expectedshape expectedslice torch tensor0 8760 7 0042 8 6602 totorchdevice self asserttruetorch allcloseoutputs logits0 3 expectedslice atol1e4 coding utf 8 2022 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch audio spectrogram transformer ast model in ast the seq length equals the number of patches 2 we add 2 for the cls and distillation tokens here we also overwrite some of the tests of test_modeling_common py as ast does not use input_ids inputs_embeds attention_mask and seq_length todo fix the failed tests when this model gets more usage signature parameters is an ordereddict so arg_names order is deterministic we will verify our results on some audio from audioset forward pass verify the logits
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class ASTModelTester: def __init__( self, parent, batch_size=13, patch_size=2, max_length=24, num_mel_bins=16, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, frequency_stride=2, time_stride=2, ): self.parent = parent self.batch_size = batch_size self.patch_size = patch_size self.max_length = max_length self.num_mel_bins = num_mel_bins self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.frequency_stride = frequency_stride self.time_stride = time_stride frequency_out_dimension = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 time_out_dimension = (self.max_length - self.patch_size) // self.time_stride + 1 num_patches = frequency_out_dimension * time_out_dimension self.seq_length = num_patches + 2 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, input_values, labels def get_config(self): return ASTConfig( patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, ) def create_and_check_model(self, config, input_values, labels): model = ASTModel(config=config) model.to(torch_device) model.eval() result = model(input_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_values, labels, ) = config_and_inputs inputs_dict = {"input_values": input_values} return config, inputs_dict @require_torch class ASTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def setUp(self): self.model_tester = ASTModelTester(self) self.config_tester = ConfigTester(self, config_class=ASTConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ASTModel.from_pretrained(model_name) self.assertIsNotNone(model) def prepare_audio(): filepath = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset" ) audio, sampling_rate = torchaudio.load(filepath) return audio, sampling_rate @require_torch @require_torchaudio class ASTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593") if is_torchaudio_available() else None ) @slow def test_inference_audio_classification(self): feature_extractor = self.default_feature_extractor model = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(torch_device) feature_extractor = self.default_feature_extractor audio, sampling_rate = prepare_audio() audio = audio.squeeze().numpy() inputs = feature_extractor(audio, sampling_rate=sampling_rate, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, 527)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
codingutf8 2019present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license this model name contains bert and roberta but roberta ends up being picked wrong model type will raise an error trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the autoapi if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test config can be reloaded if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub coding utf 8 2019 present the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 this model name contains bert and roberta but roberta ends up being picked wrong model type will raise an error trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the auto api if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test config can be reloaded if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig SAMPLE_ROBERTA_CONFIG = get_tests_dir("fixtures/dummy-config.json") class AutoConfigTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 def test_module_spec(self): self.assertIsNotNone(transformers.models.auto.__spec__) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto")) def test_config_from_model_shortcut(self): config = AutoConfig.from_pretrained("bert-base-uncased") self.assertIsInstance(config, BertConfig) def test_config_model_type_from_local_file(self): config = AutoConfig.from_pretrained(SAMPLE_ROBERTA_CONFIG) self.assertIsInstance(config, RobertaConfig) def test_config_model_type_from_model_identifier(self): config = AutoConfig.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER) self.assertIsInstance(config, RobertaConfig) def test_config_for_model_str(self): config = AutoConfig.for_model("roberta") self.assertIsInstance(config, RobertaConfig) def test_pattern_matching_fallback(self): with tempfile.TemporaryDirectory() as tmp_dir: folder = os.path.join(tmp_dir, "fake-roberta") os.makedirs(folder, exist_ok=True) with open(os.path.join(folder, "config.json"), "w") as f: f.write(json.dumps({})) config = AutoConfig.from_pretrained(folder) self.assertEqual(type(config), RobertaConfig) def test_new_config_registration(self): try: AutoConfig.register("custom", CustomConfig) with self.assertRaises(ValueError): AutoConfig.register("model", CustomConfig) with self.assertRaises(ValueError): AutoConfig.register("bert", BertConfig) config = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) new_config = AutoConfig.from_pretrained(tmp_dir) self.assertIsInstance(new_config, CustomConfig) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = AutoConfig.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoConfig.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_configuration_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.", ): _ = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo") def test_from_pretrained_dynamic_config(self): with self.assertRaises(ValueError): config = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model") with self.assertRaises(ValueError): config = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False) config = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True) self.assertEqual(config.__class__.__name__, "NewModelConfig") with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(tmp_dir) reloaded_config = AutoConfig.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_config.__class__.__name__, "NewModelConfig") def test_from_pretrained_dynamic_config_conflict(self): class NewModelConfigLocal(BertConfig): model_type = "new-model" try: AutoConfig.register("new-model", NewModelConfigLocal) config = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model") self.assertEqual(config.__class__.__name__, "NewModelConfigLocal") config = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False) self.assertEqual(config.__class__.__name__, "NewModelConfigLocal") config = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True) self.assertEqual(config.__class__.__name__, "NewModelConfig") finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license remove featureextractortype to make sure config json alone is enough to load feature processor locally save in new folder make sure private variable is not incorrectly saved if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test feature extractor can be reloaded trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the autoapi if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 noqa e402 remove feature_extractor_type to make sure config json alone is enough to load feature processor locally save in new folder make sure private variable is not incorrectly saved if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test feature extractor can be reloaded trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the auto api if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, Wav2Vec2Config, Wav2Vec2FeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig from test_module.custom_feature_extraction import CustomFeatureExtractor SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") SAMPLE_FEATURE_EXTRACTION_CONFIG = get_tests_dir("fixtures/dummy_feature_extractor_config.json") SAMPLE_CONFIG = get_tests_dir("fixtures/dummy-config.json") class AutoFeatureExtractorTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 def test_feature_extractor_from_model_shortcut(self): config = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_feature_extractor_from_local_directory_from_key(self): config = AutoFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_feature_extractor_from_local_directory_from_config(self): with tempfile.TemporaryDirectory() as tmpdirname: model_config = Wav2Vec2Config() config_dict = AutoFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR).to_dict() config_dict.pop("feature_extractor_type") config = Wav2Vec2FeatureExtractor(**config_dict) model_config.save_pretrained(tmpdirname) config.save_pretrained(tmpdirname) config = AutoFeatureExtractor.from_pretrained(tmpdirname) dict_as_saved = json.loads(config.to_json_string()) self.assertTrue("_processor_class" not in dict_as_saved) self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_feature_extractor_from_local_file(self): config = AutoFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG) self.assertIsInstance(config, Wav2Vec2FeatureExtractor) def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = AutoFeatureExtractor.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoFeatureExtractor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_feature_extractor_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ): _ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model") def test_from_pretrained_dynamic_feature_extractor(self): with self.assertRaises(ValueError): feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) with self.assertRaises(ValueError): feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=False ) feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=True ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(tmp_dir) reloaded_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_feature_extractor.__class__.__name__, "NewFeatureExtractor") def test_new_feature_extractor_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoFeatureExtractor.register(CustomConfig, CustomFeatureExtractor) with self.assertRaises(ValueError): AutoFeatureExtractor.register(Wav2Vec2Config, Wav2Vec2FeatureExtractor) feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(tmp_dir) new_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_dir) self.assertIsInstance(new_feature_extractor, CustomFeatureExtractor) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_feature_extractor_conflict(self): class NewFeatureExtractor(Wav2Vec2FeatureExtractor): is_local = True try: AutoConfig.register("custom", CustomConfig) AutoFeatureExtractor.register(CustomConfig, NewFeatureExtractor) feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor" ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") self.assertTrue(feature_extractor.is_local) feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=False ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") self.assertTrue(feature_extractor.is_local) feature_extractor = AutoFeatureExtractor.from_pretrained( "hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=True ) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") self.assertTrue(not hasattr(feature_extractor, "is_local")) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license ensure we can load the image processor from the feature extractor config create a dummy config file with imageproceesortype remove imageprocessortype to make sure config json alone is enough to load image processor locally save in new folder make sure private variable is not incorrectly saved if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test image processor can be reloaded trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the autoapi if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 noqa e402 ensure we can load the image processor from the feature extractor config create a dummy config file with image_proceesor_type remove image_processor_type to make sure config json alone is enough to load image processor locally save in new folder make sure private variable is not incorrectly saved if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test image processor can be reloaded trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the auto api if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig from test_module.custom_image_processing import CustomImageProcessor class AutoImageProcessorTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 def test_image_processor_from_model_shortcut(self): config = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32") self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_key(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config = AutoImageProcessor.from_pretrained(tmpdirname) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_feature_extractor_key(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config = AutoImageProcessor.from_pretrained(tmpdirname) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_directory_from_config(self): with tempfile.TemporaryDirectory() as tmpdirname: model_config = CLIPConfig() processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) config_dict = AutoImageProcessor.from_pretrained(tmpdirname).to_dict() config_dict.pop("image_processor_type") config = CLIPImageProcessor(**config_dict) model_config.save_pretrained(tmpdirname) config.save_pretrained(tmpdirname) config = AutoImageProcessor.from_pretrained(tmpdirname) dict_as_saved = json.loads(config.to_json_string()) self.assertTrue("_processor_class" not in dict_as_saved) self.assertIsInstance(config, CLIPImageProcessor) def test_image_processor_from_local_file(self): with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) config = AutoImageProcessor.from_pretrained(processor_tmpfile) self.assertIsInstance(config, CLIPImageProcessor) def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "clip-base is not a local folder and is not a valid model identifier" ): _ = AutoImageProcessor.from_pretrained("clip-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoImageProcessor.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_image_processor_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ): _ = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model") def test_from_pretrained_dynamic_image_processor(self): with self.assertRaises(ValueError): image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor") with self.assertRaises(ValueError): image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False ) image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_image_processor.__class__.__name__, "NewImageProcessor") def test_new_image_processor_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoImageProcessor.register(CustomConfig, CustomImageProcessor) with self.assertRaises(ValueError): AutoImageProcessor.register(CLIPConfig, CLIPImageProcessor) with tempfile.TemporaryDirectory() as tmpdirname: processor_tmpfile = Path(tmpdirname) / "preprocessor_config.json" config_tmpfile = Path(tmpdirname) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"}, open(processor_tmpfile, "w"), ) json.dump({"model_type": "clip"}, open(config_tmpfile, "w")) image_processor = CustomImageProcessor.from_pretrained(tmpdirname) with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(tmp_dir) new_image_processor = AutoImageProcessor.from_pretrained(tmp_dir) self.assertIsInstance(new_image_processor, CustomImageProcessor) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_image_processor_conflict(self): class NewImageProcessor(CLIPImageProcessor): is_local = True try: AutoConfig.register("custom", CustomConfig) AutoImageProcessor.register(CustomConfig, NewImageProcessor) image_processor = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor") self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(image_processor.is_local) image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=False ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(image_processor.is_local) image_processor = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor", trust_remote_code=True ) self.assertEqual(image_processor.__class__.__name__, "NewImageProcessor") self.assertTrue(not hasattr(image_processor, "is_local")) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license when using pytorch checkpoint the expected value is 8 with safetensors checkpoint if it is installed the expected value becomes 7 only one value should not be initialized and in the missing keys configs can t be loaded for timm models we can t pass outputloadinginfotrue as we re loading from timm check kwargs are correctly passed to the backbone check outfeatures cannot be passed to timm backbones check kwargs are correctly passed to the backbone for the auto model mapping funnelconfig has two models funnelmodel and funnelbasemodel if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test model can be reloaded this one uses a relative import to a util file this checks it is downloaded and used properly test model can be reloaded test model can be reloaded this one uses a relative import to a util file this checks it is downloaded and used properly test model can be reloaded wrong config class will raise an error trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the autoapi the model is a custommodel but from the new dynamically imported class if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub make sure we have cached the model with a sharded checkpoint coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 when using pytorch checkpoint the expected value is 8 with safetensors checkpoint if it is installed the expected value becomes 7 only one value should not be initialized and in the missing keys configs can t be loaded for timm models we can t pass output_loading_info true as we re loading from timm check kwargs are correctly passed to the backbone check out_features cannot be passed to timm backbones check kwargs are correctly passed to the backbone for the auto model mapping funnelconfig has two models funnelmodel and funnelbasemodel if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test model can be reloaded this one uses a relative import to a util file this checks it is downloaded and used properly test model can be reloaded test model can be reloaded this one uses a relative import to a util file this checks it is downloaded and used properly test model can be reloaded wrong config class will raise an error trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the auto api the model is a custommodel but from the new dynamically imported class if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub make sure we have cached the model with a sharded checkpoint
import copy import sys import tempfile import unittest from collections import OrderedDict from pathlib import Path import pytest import transformers from transformers import BertConfig, GPT2Model, is_safetensors_available, is_torch_available from transformers.models.auto.configuration_auto import CONFIG_MAPPING from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_torch, slow, ) from ..bert.test_modeling_bert import BertModelTester sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig if is_torch_available(): import torch from test_module.custom_modeling import CustomModel from transformers import ( AutoBackbone, AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTableQuestionAnswering, AutoModelForTokenClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertModel, FunnelBaseModel, FunnelModel, GPT2Config, GPT2LMHeadModel, ResNetBackbone, RobertaForMaskedLM, T5Config, T5ForConditionalGeneration, TapasConfig, TapasForQuestionAnswering, TimmBackbone, ) from transformers.models.auto.modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_MAPPING, ) from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpt2.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tapas import TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST @require_torch class AutoModelTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModel.from_pretrained(model_name) model, loading_info = AutoModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertModel) self.assertEqual(len(loading_info["missing_keys"]), 0) EXPECTED_NUM_OF_UNEXPECTED_KEYS = 7 if is_safetensors_available() else 8 self.assertEqual(len(loading_info["unexpected_keys"]), EXPECTED_NUM_OF_UNEXPECTED_KEYS) self.assertEqual(len(loading_info["mismatched_keys"]), 0) self.assertEqual(len(loading_info["error_msgs"]), 0) @slow def test_model_for_pretraining_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForPreTraining.from_pretrained(model_name) model, loading_info = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForPreTraining) for key, value in loading_info.items(): self.assertEqual(len(value), 0) @slow def test_lmhead_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelWithLMHead.from_pretrained(model_name) model, loading_info = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_causal_lm(self): for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, GPT2Config) model = AutoModelForCausalLM.from_pretrained(model_name) model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, GPT2LMHeadModel) @slow def test_model_for_masked_lm(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForMaskedLM.from_pretrained(model_name) model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_encoder_decoder_lm(self): for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, T5Config) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, T5ForConditionalGeneration) @slow def test_sequence_classification_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForSequenceClassification.from_pretrained(model_name) model, loading_info = AutoModelForSequenceClassification.from_pretrained( model_name, output_loading_info=True ) self.assertIsNotNone(model) self.assertIsInstance(model, BertForSequenceClassification) @slow def test_question_answering_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForQuestionAnswering.from_pretrained(model_name) model, loading_info = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForQuestionAnswering) @slow def test_table_question_answering_model_from_pretrained(self): for model_name in TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, TapasConfig) model = AutoModelForTableQuestionAnswering.from_pretrained(model_name) model, loading_info = AutoModelForTableQuestionAnswering.from_pretrained( model_name, output_loading_info=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TapasForQuestionAnswering) @slow def test_token_classification_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForTokenClassification.from_pretrained(model_name) model, loading_info = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForTokenClassification) @slow def test_auto_backbone_timm_model_from_pretrained(self): model = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True) with pytest.raises(ValueError): AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TimmBackbone) model = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, out_indices=(-1, -2)) self.assertEqual(model.out_indices, (-1, -2)) with self.assertRaises(ValueError): _ = AutoBackbone.from_pretrained("resnet18", use_timm_backbone=True, out_features=["stage1"]) @slow def test_auto_backbone_from_pretrained(self): model = AutoBackbone.from_pretrained("microsoft/resnet-18") model, loading_info = AutoBackbone.from_pretrained("microsoft/resnet-18", output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, ResNetBackbone) model = AutoBackbone.from_pretrained("microsoft/resnet-18", out_indices=[-1, -2]) self.assertEqual(model.out_indices, [-1, -2]) self.assertEqual(model.out_features, ["stage4", "stage3"]) model = AutoBackbone.from_pretrained("microsoft/resnet-18", out_features=["stage2", "stage4"]) self.assertEqual(model.out_indices, [2, 4]) self.assertEqual(model.out_features, ["stage2", "stage4"]) def test_from_pretrained_identifier(self): model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER) self.assertIsInstance(model, BertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_identifier_from_model_type(self): model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER) self.assertIsInstance(model, RobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_pretrained_with_tuple_values(self): model = AutoModel.from_pretrained("sgugger/funnel-random-tiny") self.assertIsInstance(model, FunnelModel) config = copy.deepcopy(model.config) config.architectures = ["FunnelBaseModel"] model = AutoModel.from_config(config) self.assertIsInstance(model, FunnelBaseModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = AutoModel.from_pretrained(tmp_dir) self.assertIsInstance(model, FunnelBaseModel) def test_from_pretrained_dynamic_model_local(self): try: AutoConfig.register("custom", CustomConfig) AutoModel.register(CustomConfig, CustomModel) config = CustomConfig(hidden_size=32) model = CustomModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in MODEL_MAPPING._extra_content: del MODEL_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_model_distant(self): with self.assertRaises(ValueError): model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model") with self.assertRaises(ValueError): model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False) model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True) self.assertEqual(model.__class__.__name__, "NewModel") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_model.__class__.__name__, "NewModel") for p1, p2 in zip(model.parameters(), reloaded_model.parameters()): self.assertTrue(torch.equal(p1, p2)) model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model_with_util", trust_remote_code=True) self.assertEqual(model.__class__.__name__, "NewModel") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_model.__class__.__name__, "NewModel") for p1, p2 in zip(model.parameters(), reloaded_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_from_pretrained_dynamic_model_distant_with_ref(self): model = AutoModel.from_pretrained("hf-internal-testing/ref_to_test_dynamic_model", trust_remote_code=True) self.assertEqual(model.__class__.__name__, "NewModel") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_model.__class__.__name__, "NewModel") for p1, p2 in zip(model.parameters(), reloaded_model.parameters()): self.assertTrue(torch.equal(p1, p2)) model = AutoModel.from_pretrained( "hf-internal-testing/ref_to_test_dynamic_model_with_util", trust_remote_code=True ) self.assertEqual(model.__class__.__name__, "NewModel") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) reloaded_model = AutoModel.from_pretrained(tmp_dir, trust_remote_code=True) self.assertEqual(reloaded_model.__class__.__name__, "NewModel") for p1, p2 in zip(model.parameters(), reloaded_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_new_model_registration(self): AutoConfig.register("custom", CustomConfig) auto_classes = [ AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoModelForTokenClassification, ] try: for auto_class in auto_classes: with self.subTest(auto_class.__name__): with self.assertRaises(ValueError): auto_class.register(BertConfig, CustomModel) auto_class.register(CustomConfig, CustomModel) with self.assertRaises(ValueError): auto_class.register(BertConfig, BertModel) tiny_config = BertModelTester(self).get_config() config = CustomConfig(**tiny_config.to_dict()) model = auto_class.from_config(config) self.assertIsInstance(model, CustomModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = auto_class.from_pretrained(tmp_dir) self.assertIsInstance(new_model, CustomModel) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] for mapping in ( MODEL_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, ): if CustomConfig in mapping._extra_content: del mapping._extra_content[CustomConfig] def test_from_pretrained_dynamic_model_conflict(self): class NewModelConfigLocal(BertConfig): model_type = "new-model" class NewModel(BertModel): config_class = NewModelConfigLocal try: AutoConfig.register("new-model", NewModelConfigLocal) AutoModel.register(NewModelConfigLocal, NewModel) model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model") self.assertEqual(model.config.__class__.__name__, "NewModelConfigLocal") model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=False) self.assertEqual(model.config.__class__.__name__, "NewModelConfigLocal") model = AutoModel.from_pretrained("hf-internal-testing/test_dynamic_model", trust_remote_code=True) self.assertEqual(model.config.__class__.__name__, "NewModelConfig") finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] if NewModelConfigLocal in MODEL_MAPPING._extra_content: del MODEL_MAPPING._extra_content[NewModelConfigLocal] def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = AutoModel.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_model_file_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin", ): _ = AutoModel.from_pretrained("hf-internal-testing/config-no-model") def test_model_from_tf_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_tf=True` to load this model"): _ = AutoModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") def test_model_from_flax_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_flax=True` to load this model"): _ = AutoModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") def test_cached_model_has_minimum_calls_to_head(self): _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") with RequestCounter() as counter: _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) def test_attr_not_existing(self): from transformers.models.auto.auto_factory import _LazyAutoMapping _CONFIG_MAPPING_NAMES = OrderedDict([("bert", "BertConfig")]) _MODEL_MAPPING_NAMES = OrderedDict([("bert", "GhostModel")]) _MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES) with pytest.raises(ValueError, match=r"Could not find GhostModel neither in .* nor in .*!"): _MODEL_MAPPING[BertConfig] _MODEL_MAPPING_NAMES = OrderedDict([("bert", "BertModel")]) _MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES) self.assertEqual(_MODEL_MAPPING[BertConfig], BertModel) _MODEL_MAPPING_NAMES = OrderedDict([("bert", "GPT2Model")]) _MODEL_MAPPING = _LazyAutoMapping(_CONFIG_MAPPING_NAMES, _MODEL_MAPPING_NAMES) self.assertEqual(_MODEL_MAPPING[BertConfig], GPT2Model)
2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license
import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class FlaxAutoModelTest(unittest.TestCase): @slow def test_bert_from_pretrained(self): for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(model_name): config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = FlaxAutoModel.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, FlaxBertModel) @slow def test_roberta_from_pretrained(self): for model_name in ["roberta-base", "roberta-large"]: with self.subTest(model_name): config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = FlaxAutoModel.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, FlaxRobertaModel) @slow def test_bert_jax_jit(self): for model_name in ["bert-base-cased", "bert-large-uncased"]: tokenizer = AutoTokenizer.from_pretrained(model_name) model = FlaxBertModel.from_pretrained(model_name) tokens = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX) @jax.jit def eval(**kwargs): return model(**kwargs) eval(**tokens).block_until_ready() @slow def test_roberta_jax_jit(self): for model_name in ["roberta-base", "roberta-large"]: tokenizer = AutoTokenizer.from_pretrained(model_name) model = FlaxRobertaModel.from_pretrained(model_name) tokens = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX) @jax.jit def eval(**kwargs): return model(**kwargs) eval(**tokens).block_until_ready() def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = FlaxAutoModel.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = FlaxAutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_model_file_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ): _ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model") def test_model_from_pt_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_pt=True` to load this model"): _ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for modelname in tfbertpretrainedmodelarchivelist 1 for modelname in tfbertpretrainedmodelarchivelist 1 for the auto model mapping funnelconfig has two models funnelmodel and funnelbasemodel wrong config class will raise an error trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the autoapi make sure we have cached the model with a sharded checkpoint coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for model_name in tf_bert_pretrained_model_archive_list 1 for model_name in tf_bert_pretrained_model_archive_list 1 for the auto model mapping funnelconfig has two models funnelmodel and funnelbasemodel wrong config class will raise an error trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the auto api make sure we have cached the model with a sharded checkpoint
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPT2Config, T5Config, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPT2LMHeadModel, TFRobertaForMaskedLM, TFT5ForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class NewModelConfig(BertConfig): model_type = "new-model" if is_tf_available(): class TFNewModel(TFBertModel): config_class = NewModelConfig @require_tf class TFAutoModelTest(unittest.TestCase): @slow def test_model_from_pretrained(self): model_name = "bert-base-cased" config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModel.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertModel) @slow def test_model_for_pretraining_from_pretrained(self): model_name = "bert-base-cased" config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForPreTraining.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForPreTraining) @slow def test_model_for_causal_lm(self): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, GPT2Config) model = TFAutoModelForCausalLM.from_pretrained(model_name) model, loading_info = TFAutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFGPT2LMHeadModel) @slow def test_lmhead_model_from_pretrained(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelWithLMHead.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) @slow def test_model_for_masked_lm(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForMaskedLM.from_pretrained(model_name) model, loading_info = TFAutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) @slow def test_model_for_encoder_decoder_lm(self): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, T5Config) model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name) model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFT5ForConditionalGeneration) @slow def test_sequence_classification_model_from_pretrained(self): for model_name in ["bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForSequenceClassification.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForSequenceClassification) @slow def test_question_answering_model_from_pretrained(self): for model_name in ["bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForQuestionAnswering.from_pretrained(model_name) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForQuestionAnswering) @slow @require_tensorflow_probability def test_table_question_answering_model_from_pretrained(self): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, TapasConfig) model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_name) model, loading_info = TFAutoModelForTableQuestionAnswering.from_pretrained( model_name, output_loading_info=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFTapasForQuestionAnswering) def test_from_pretrained_identifier(self): model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER) self.assertIsInstance(model, TFBertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_identifier_from_model_type(self): model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER) self.assertIsInstance(model, TFRobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_pretrained_with_tuple_values(self): model = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny") self.assertIsInstance(model, TFFunnelModel) config = copy.deepcopy(model.config) config.architectures = ["FunnelBaseModel"] model = TFAutoModel.from_config(config) model.build() self.assertIsInstance(model, TFFunnelBaseModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = TFAutoModel.from_pretrained(tmp_dir) self.assertIsInstance(model, TFFunnelBaseModel) def test_new_model_registration(self): try: AutoConfig.register("new-model", NewModelConfig) auto_classes = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__): with self.assertRaises(ValueError): auto_class.register(BertConfig, TFNewModel) auto_class.register(NewModelConfig, TFNewModel) with self.assertRaises(ValueError): auto_class.register(BertConfig, TFBertModel) tiny_config = BertModelTester(self).get_config() config = NewModelConfig(**tiny_config.to_dict()) model = auto_class.from_config(config) model.build() self.assertIsInstance(model, TFNewModel) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = auto_class.from_pretrained(tmp_dir) self.assertIsInstance(new_model, TFNewModel) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = TFAutoModel.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = TFAutoModel.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_model_file_not_found(self): with self.assertRaisesRegex( EnvironmentError, "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin", ): _ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model") def test_model_from_pt_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_pt=True` to load this model"): _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") def test_cached_model_has_minimum_calls_to_head(self): _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") with RequestCounter() as counter: _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1)
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for modelname in tfbertpretrainedmodelarchivelist 1 for modelname in tfbertpretrainedmodelarchivelist 1 for modelname in tfbertpretrainedmodelarchivelist 1 for modelname in tfbertpretrainedmodelarchivelist 1 coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license for model_name in tf_bert_pretrained_model_archive_list 1 for model_name in tf_bert_pretrained_model_archive_list 1 for model_name in tf_bert_pretrained_model_archive_list 1 for model_name in tf_bert_pretrained_model_archive_list 1
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPT2Config, T5Config, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPT2LMHeadModel, TFRobertaForMaskedLM, TFT5ForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpt2.modeling_tf_gpt2 import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.t5.modeling_tf_t5 import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPT2LMHeadModel, RobertaForMaskedLM, T5ForConditionalGeneration, ) @is_pt_tf_cross_test class TFPTAutoModelTest(unittest.TestCase): @slow def test_model_from_pretrained(self): for model_name in ["bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModel.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertModel) model = AutoModel.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertModel) @slow def test_model_for_pretraining_from_pretrained(self): for model_name in ["bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForPreTraining.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForPreTraining) model = AutoModelForPreTraining.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForPreTraining) @slow def test_model_for_causal_lm(self): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, GPT2Config) model = TFAutoModelForCausalLM.from_pretrained(model_name, from_pt=True) model, loading_info = TFAutoModelForCausalLM.from_pretrained( model_name, output_loading_info=True, from_pt=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFGPT2LMHeadModel) model = AutoModelForCausalLM.from_pretrained(model_name, from_tf=True) model, loading_info = AutoModelForCausalLM.from_pretrained( model_name, output_loading_info=True, from_tf=True ) self.assertIsNotNone(model) self.assertIsInstance(model, GPT2LMHeadModel) @slow def test_lmhead_model_from_pretrained(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelWithLMHead.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) model = AutoModelWithLMHead.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_masked_lm(self): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForMaskedLM.from_pretrained(model_name, from_pt=True) model, loading_info = TFAutoModelForMaskedLM.from_pretrained( model_name, output_loading_info=True, from_pt=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForMaskedLM) model = AutoModelForMaskedLM.from_pretrained(model_name, from_tf=True) model, loading_info = AutoModelForMaskedLM.from_pretrained( model_name, output_loading_info=True, from_tf=True ) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_encoder_decoder_lm(self): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, T5Config) model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, from_pt=True) model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained( model_name, output_loading_info=True, from_pt=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TFT5ForConditionalGeneration) model = AutoModelForSeq2SeqLM.from_pretrained(model_name, from_tf=True) model, loading_info = AutoModelForSeq2SeqLM.from_pretrained( model_name, output_loading_info=True, from_tf=True ) self.assertIsNotNone(model) self.assertIsInstance(model, T5ForConditionalGeneration) @slow def test_sequence_classification_model_from_pretrained(self): for model_name in ["bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForSequenceClassification.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForSequenceClassification) model = AutoModelForSequenceClassification.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForSequenceClassification) @slow def test_question_answering_model_from_pretrained(self): for model_name in ["bert-base-uncased"]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = TFAutoModelForQuestionAnswering.from_pretrained(model_name, from_pt=True) self.assertIsNotNone(model) self.assertIsInstance(model, TFBertForQuestionAnswering) model = AutoModelForQuestionAnswering.from_pretrained(model_name, from_tf=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForQuestionAnswering) def test_from_pretrained_identifier(self): model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_pt=True) self.assertIsInstance(model, TFBertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_tf=True) self.assertIsInstance(model, BertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_identifier_from_model_type(self): model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, from_pt=True) self.assertIsInstance(model, TFRobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, from_tf=True) self.assertIsInstance(model, RobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410)
codingutf8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license save in new folder copy relevant files save in new folder drop processorclass in tokenizer save in new folder drop processorclass in feature extractor copy relevant files create emtpy sample processor if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test we can also load the slow version trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the autoapi if remote code is not set the default is to use local classes if remote code is disabled we load the local ones if remote is enabled we load from the hub this has added the proper automap field to the feature extractor config this has added the proper automap field to the tokenizer config the code has been copied from fixtures can t make an isinstance check because the newprocessor is from the customprocessor class of a dynamic module coding utf 8 2021 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 noqa e402 noqa e402 noqa e402 save in new folder copy relevant files save in new folder drop processor_class in tokenizer save in new folder drop processor_class in feature extractor copy relevant files create emtpy sample processor if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test we can also load the slow version trying to register something existing in the transformers library will raise an error now that the config is registered it can be used as any other config with the auto api if remote code is not set the default is to use local classes if remote code is disabled we load the local ones if remote is enabled we load from the hub this has added the proper auto_map field to the feature extractor config this has added the proper auto_map field to the tokenizer config the code has been copied from fixtures can t make an isinstance check because the new_processor is from the customprocessor class of a dynamic module
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig from test_module.custom_feature_extraction import CustomFeatureExtractor from test_module.custom_processing import CustomProcessor from test_module.custom_tokenization import CustomTokenizer SAMPLE_PROCESSOR_CONFIG = get_tests_dir("fixtures/dummy_feature_extractor_config.json") SAMPLE_VOCAB = get_tests_dir("fixtures/vocab.json") SAMPLE_PROCESSOR_CONFIG_DIR = get_tests_dir("fixtures") class AutoFeatureExtractorTest(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 def test_processor_from_model_shortcut(self): processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsInstance(processor, Wav2Vec2Processor) def test_processor_from_local_directory_from_repo(self): with tempfile.TemporaryDirectory() as tmpdirname: model_config = Wav2Vec2Config() processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") model_config.save_pretrained(tmpdirname) processor.save_pretrained(tmpdirname) processor = AutoProcessor.from_pretrained(tmpdirname) self.assertIsInstance(processor, Wav2Vec2Processor) def test_processor_from_local_directory_from_extractor_config(self): with tempfile.TemporaryDirectory() as tmpdirname: copyfile(SAMPLE_PROCESSOR_CONFIG, os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME)) copyfile(SAMPLE_VOCAB, os.path.join(tmpdirname, "vocab.json")) processor = AutoProcessor.from_pretrained(tmpdirname) self.assertIsInstance(processor, Wav2Vec2Processor) def test_processor_from_feat_extr_processor_class(self): with tempfile.TemporaryDirectory() as tmpdirname: feature_extractor = Wav2Vec2FeatureExtractor() tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") processor = Wav2Vec2Processor(feature_extractor, tokenizer) processor.save_pretrained(tmpdirname) with open(os.path.join(tmpdirname, TOKENIZER_CONFIG_FILE), "r") as f: config_dict = json.load(f) config_dict.pop("processor_class") with open(os.path.join(tmpdirname, TOKENIZER_CONFIG_FILE), "w") as f: f.write(json.dumps(config_dict)) processor = AutoProcessor.from_pretrained(tmpdirname) self.assertIsInstance(processor, Wav2Vec2Processor) def test_processor_from_tokenizer_processor_class(self): with tempfile.TemporaryDirectory() as tmpdirname: feature_extractor = Wav2Vec2FeatureExtractor() tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") processor = Wav2Vec2Processor(feature_extractor, tokenizer) processor.save_pretrained(tmpdirname) with open(os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME), "r") as f: config_dict = json.load(f) config_dict.pop("processor_class") with open(os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME), "w") as f: f.write(json.dumps(config_dict)) processor = AutoProcessor.from_pretrained(tmpdirname) self.assertIsInstance(processor, Wav2Vec2Processor) def test_processor_from_local_directory_from_model_config(self): with tempfile.TemporaryDirectory() as tmpdirname: model_config = Wav2Vec2Config(processor_class="Wav2Vec2Processor") model_config.save_pretrained(tmpdirname) copyfile(SAMPLE_VOCAB, os.path.join(tmpdirname, "vocab.json")) with open(os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME), "w") as f: f.write("{}") processor = AutoProcessor.from_pretrained(tmpdirname) self.assertIsInstance(processor, Wav2Vec2Processor) def test_from_pretrained_dynamic_processor(self): with self.assertRaises(ValueError): processor = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor") with self.assertRaises(ValueError): processor = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=False ) processor = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor", trust_remote_code=True) self.assertTrue(processor.special_attribute_present) self.assertEqual(processor.__class__.__name__, "NewProcessor") feature_extractor = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present) self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor") tokenizer = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast") new_processor = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=True, use_fast=False ) new_tokenizer = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present) self.assertEqual(new_tokenizer.__class__.__name__, "NewTokenizer") else: self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") def test_new_processor_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoFeatureExtractor.register(CustomConfig, CustomFeatureExtractor) AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer) AutoProcessor.register(CustomConfig, CustomProcessor) with self.assertRaises(ValueError): AutoProcessor.register(Wav2Vec2Config, Wav2Vec2Processor) feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR) with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) processor = CustomProcessor(feature_extractor, tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(tmp_dir) new_processor = AutoProcessor.from_pretrained(tmp_dir) self.assertIsInstance(new_processor, CustomProcessor) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_processor_conflict(self): class NewFeatureExtractor(Wav2Vec2FeatureExtractor): special_attribute_present = False class NewTokenizer(BertTokenizer): special_attribute_present = False class NewProcessor(ProcessorMixin): feature_extractor_class = "AutoFeatureExtractor" tokenizer_class = "AutoTokenizer" special_attribute_present = False try: AutoConfig.register("custom", CustomConfig) AutoFeatureExtractor.register(CustomConfig, NewFeatureExtractor) AutoTokenizer.register(CustomConfig, slow_tokenizer_class=NewTokenizer) AutoProcessor.register(CustomConfig, NewProcessor) processor = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor") self.assertEqual(processor.__class__.__name__, "NewProcessor") self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) processor = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=False ) self.assertEqual(processor.__class__.__name__, "NewProcessor") self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) processor = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor", trust_remote_code=True ) self.assertEqual(processor.__class__.__name__, "NewProcessor") self.assertTrue(processor.special_attribute_present) self.assertTrue(processor.feature_extractor.special_attribute_present) self.assertTrue(processor.tokenizer.special_attribute_present) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def test_auto_processor_creates_tokenizer(self): processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert") self.assertEqual(processor.__class__.__name__, "BertTokenizerFast") def test_auto_processor_creates_image_processor(self): processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext") self.assertEqual(processor.__class__.__name__, "ConvNextImageProcessor") @is_staging_test class ProcessorPushToHubTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-processor") except HTTPError: pass def test_push_to_hub(self): processor = Wav2Vec2Processor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(os.path.join(tmp_dir, "test-processor"), push_to_hub=True, token=self._token) new_processor = Wav2Vec2Processor.from_pretrained(f"{USER}/test-processor") for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_processor.feature_extractor, k)) self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab()) def test_push_to_hub_in_organization(self): processor = Wav2Vec2Processor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(tmp_dir, "test-processor-org"), push_to_hub=True, token=self._token, organization="valid_org", ) new_processor = Wav2Vec2Processor.from_pretrained("valid_org/test-processor-org") for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_processor.feature_extractor, k)) self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab()) def test_push_to_hub_dynamic_processor(self): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_PROCESSOR_CONFIG_DIR) with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) processor = CustomProcessor(feature_extractor, tokenizer) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"{USER}/test-dynamic-processor", token=self._token) repo = Repository(tmp_dir, clone_from=f"{USER}/test-dynamic-processor", token=self._token) processor.save_pretrained(tmp_dir) self.assertDictEqual( processor.feature_extractor.auto_map, { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", }, ) with open(os.path.join(tmp_dir, "tokenizer_config.json")) as f: tokenizer_config = json.load(f) self.assertDictEqual( tokenizer_config["auto_map"], { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", }, ) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, "custom_feature_extraction.py"))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, "custom_tokenization.py"))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, "custom_processing.py"))) repo.push_to_hub() new_processor = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor", trust_remote_code=True) self.assertEqual(new_processor.__class__.__name__, "CustomProcessor")
codingutf8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license check that tokenizertype modeltype tests https github comhuggingfacetransformerspull13251 1 models with e g xlmroberta xlmroberta 2 models that don t remap 11 from modelname to model file e g openaigpt openai must find the right class there is no fast ctrl so this always gives us a slow tokenizer check we can load the tokenizer config of an online model if we ever update bertbasecased tokenizer config this dict here will need to be updated this model does not have a tokenizerconfig so we get back an empty dict a tokenizer saved with savepretrained always creates a tokenizer config check the class of the tokenizer was properly saved note that it always saves the slow class trying to register something existing in the transformers library will raise an error can register in two steps can register in one step trying to register something existing in the transformers library will raise an error we pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer and that model does not have a tokenizer json if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test tokenizer can be reloaded test we can also load the slow version test tokenizer can be reloaded if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub test we can also load the slow version make sure we have cached the tokenizer coding utf 8 2020 the huggingface team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license noqa e402 noqa e402 check that tokenizer_type model_type tests https github com huggingface transformers pull 13251 1 models with e g xlm roberta xlm_roberta 2 models that don t remap 1 1 from model name to model file e g openai gpt openai must find the right class there is no fast ctrl so this always gives us a slow tokenizer check we can load the tokenizer config of an online model if we ever update bert base cased tokenizer config this dict here will need to be updated this model does not have a tokenizer_config so we get back an empty dict a tokenizer saved with save_pretrained always creates a tokenizer config check the class of the tokenizer was properly saved note that it always saves the slow class trying to register something existing in the transformers library will raise an error can register in two steps can register in one step trying to register something existing in the transformers library will raise an error we pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer and that model does not have a tokenizer json if remote code is not set we will time out when asking whether to load the model if remote code is disabled we can t load this config test tokenizer can be reloaded test we can also load the slow version test tokenizer can be reloaded if remote code is not set the default is to use local if remote code is disabled we load the local one if remote is enabled we load from the hub test we can also load the slow version make sure we have cached the tokenizer
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPT2Tokenizer, GPT2TokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig from test_module.custom_tokenization import CustomTokenizer if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class AutoTokenizerTest(unittest.TestCase): def setUp(self): transformers.dynamic_module_utils.TIME_OUT_REMOTE_CODE = 0 @slow def test_tokenizer_from_pretrained(self): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): tokenizer = AutoTokenizer.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertGreater(len(tokenizer), 0) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): tokenizer = AutoTokenizer.from_pretrained(model_name) self.assertIsNotNone(tokenizer) self.assertIsInstance(tokenizer, (GPT2Tokenizer, GPT2TokenizerFast)) self.assertGreater(len(tokenizer), 0) def test_tokenizer_from_pretrained_identifier(self): tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER) self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(tokenizer.vocab_size, 12) def test_tokenizer_from_model_type(self): tokenizer = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER) self.assertIsInstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast)) self.assertEqual(tokenizer.vocab_size, 20) def test_tokenizer_from_tokenizer_class(self): config = AutoConfig.from_pretrained(DUMMY_DIFF_TOKENIZER_IDENTIFIER) self.assertIsInstance(config, RobertaConfig) tokenizer = AutoTokenizer.from_pretrained(DUMMY_DIFF_TOKENIZER_IDENTIFIER, config=config) self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast)) self.assertEqual(tokenizer.vocab_size, 12) def test_tokenizer_from_type(self): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt", os.path.join(tmp_dir, "vocab.txt")) tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="bert", use_fast=False) self.assertIsInstance(tokenizer, BertTokenizer) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json", os.path.join(tmp_dir, "vocab.json")) shutil.copy("./tests/fixtures/merges.txt", os.path.join(tmp_dir, "merges.txt")) tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="gpt2", use_fast=False) self.assertIsInstance(tokenizer, GPT2Tokenizer) @require_tokenizers def test_tokenizer_from_type_fast(self): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.txt", os.path.join(tmp_dir, "vocab.txt")) tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="bert") self.assertIsInstance(tokenizer, BertTokenizerFast) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("./tests/fixtures/vocab.json", os.path.join(tmp_dir, "vocab.json")) shutil.copy("./tests/fixtures/merges.txt", os.path.join(tmp_dir, "merges.txt")) tokenizer = AutoTokenizer.from_pretrained(tmp_dir, tokenizer_type="gpt2") self.assertIsInstance(tokenizer, GPT2TokenizerFast) def test_tokenizer_from_type_incorrect_name(self): with pytest.raises(ValueError): AutoTokenizer.from_pretrained("./", tokenizer_type="xxx") @require_tokenizers def test_tokenizer_identifier_with_correct_config(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: tokenizer = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased") self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast)) if isinstance(tokenizer, BertTokenizer): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case, False) else: self.assertEqual(tokenizer.do_lower_case, False) self.assertEqual(tokenizer.model_max_length, 512) @require_tokenizers def test_tokenizer_identifier_non_existent(self): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( EnvironmentError, "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier", ): _ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists") def test_model_name_edge_cases_in_mappings(self): tokenizers = TOKENIZER_MAPPING.values() tokenizer_names = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__) for tokenizer_name in tokenizer_names: tokenizer_class_from_name(tokenizer_name) @require_tokenizers def test_from_pretrained_use_fast_toggle(self): self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased", use_fast=False), BertTokenizer) self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased"), BertTokenizerFast) @require_tokenizers def test_do_lower_case(self): tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased", do_lower_case=False) sample = "Hello, world. How are you?" tokens = tokenizer.tokenize(sample) self.assertEqual("[UNK]", tokens[0]) tokenizer = AutoTokenizer.from_pretrained("microsoft/mpnet-base", do_lower_case=False) tokens = tokenizer.tokenize(sample) self.assertEqual("[UNK]", tokens[0]) @require_tokenizers def test_PreTrainedTokenizerFast_from_pretrained(self): tokenizer = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config") self.assertEqual(type(tokenizer), PreTrainedTokenizerFast) self.assertEqual(tokenizer.model_max_length, 512) self.assertEqual(tokenizer.vocab_size, 30000) self.assertEqual(tokenizer.unk_token, "[UNK]") self.assertEqual(tokenizer.padding_side, "right") self.assertEqual(tokenizer.truncation_side, "right") def test_auto_tokenizer_from_local_folder(self): tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER) self.assertIsInstance(tokenizer, (BertTokenizer, BertTokenizerFast)) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir) tokenizer2 = AutoTokenizer.from_pretrained(tmp_dir) self.assertIsInstance(tokenizer2, tokenizer.__class__) self.assertEqual(tokenizer2.vocab_size, 12) def test_auto_tokenizer_fast_no_slow(self): tokenizer = AutoTokenizer.from_pretrained("ctrl") self.assertIsInstance(tokenizer, CTRLTokenizer) def test_get_tokenizer_config(self): config = get_tokenizer_config("bert-base-cased") _ = config.pop("_commit_hash", None) self.assertEqual(config, {"do_lower_case": False}) config = get_tokenizer_config(SMALL_MODEL_IDENTIFIER) self.assertDictEqual(config, {}) tokenizer = AutoTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir) config = get_tokenizer_config(tmp_dir) self.assertEqual(config["tokenizer_class"], "BertTokenizer") def test_new_tokenizer_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer) with self.assertRaises(ValueError): AutoTokenizer.register(BertConfig, slow_tokenizer_class=BertTokenizer) tokenizer = CustomTokenizer.from_pretrained(SMALL_MODEL_IDENTIFIER) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertIsInstance(new_tokenizer, CustomTokenizer) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def test_new_tokenizer_fast_registration(self): try: AutoConfig.register("custom", CustomConfig) AutoTokenizer.register(CustomConfig, slow_tokenizer_class=CustomTokenizer) self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, None)) AutoTokenizer.register(CustomConfig, fast_tokenizer_class=CustomTokenizerFast) self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast)) del TOKENIZER_MAPPING._extra_content[CustomConfig] AutoTokenizer.register( CustomConfig, slow_tokenizer_class=CustomTokenizer, fast_tokenizer_class=CustomTokenizerFast ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast)) with self.assertRaises(ValueError): AutoTokenizer.register(BertConfig, fast_tokenizer_class=BertTokenizerFast) with tempfile.TemporaryDirectory() as tmp_dir: bert_tokenizer = BertTokenizerFast.from_pretrained(SMALL_MODEL_IDENTIFIER) bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertIsInstance(new_tokenizer, CustomTokenizerFast) new_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, use_fast=False) self.assertIsInstance(new_tokenizer, CustomTokenizer) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_tokenizer(self): with self.assertRaises(ValueError): tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer") with self.assertRaises(ValueError): tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=False ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True) self.assertTrue(tokenizer.special_attribute_present) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir) reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, trust_remote_code=True) self.assertTrue(reloaded_tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast") self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizerFast") tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True, use_fast=False ) self.assertTrue(tokenizer.special_attribute_present) self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(tmp_dir) reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir, trust_remote_code=True, use_fast=False) self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizer") self.assertTrue(reloaded_tokenizer.special_attribute_present) else: self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") self.assertEqual(reloaded_tokenizer.__class__.__name__, "NewTokenizer") @require_tokenizers def test_from_pretrained_dynamic_tokenizer_conflict(self): class NewTokenizer(BertTokenizer): special_attribute_present = False class NewTokenizerFast(BertTokenizerFast): slow_tokenizer_class = NewTokenizer special_attribute_present = False try: AutoConfig.register("custom", CustomConfig) AutoTokenizer.register(CustomConfig, slow_tokenizer_class=NewTokenizer) AutoTokenizer.register(CustomConfig, fast_tokenizer_class=NewTokenizerFast) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer") self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast") self.assertFalse(tokenizer.special_attribute_present) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer", use_fast=False) self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") self.assertFalse(tokenizer.special_attribute_present) tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=False ) self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast") self.assertFalse(tokenizer.special_attribute_present) tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=False, use_fast=False ) self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") self.assertFalse(tokenizer.special_attribute_present) tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True ) self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast") self.assertTrue(tokenizer.special_attribute_present) tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer", trust_remote_code=True, use_fast=False ) self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") self.assertTrue(tokenizer.special_attribute_present) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def test_from_pretrained_dynamic_tokenizer_legacy_format(self): tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy", trust_remote_code=True ) self.assertTrue(tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__, "NewTokenizerFast") tokenizer = AutoTokenizer.from_pretrained( "hf-internal-testing/test_dynamic_tokenizer_legacy", trust_remote_code=True, use_fast=False ) self.assertTrue(tokenizer.special_attribute_present) self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") else: self.assertEqual(tokenizer.__class__.__name__, "NewTokenizer") def test_repo_not_found(self): with self.assertRaisesRegex( EnvironmentError, "bert-base is not a local folder and is not a valid model identifier" ): _ = AutoTokenizer.from_pretrained("bert-base") def test_revision_not_found(self): with self.assertRaisesRegex( EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") def test_cached_tokenizer_has_minimum_calls_to_head(self): _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1)
codingutf8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache orglicenseslicense2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch autoformer model import inspect import tempfile import unittest from huggingfacehub import hfhubdownload from transformers import istorchavailable from transformers testingutils import isflaky requiretorch slow torchdevice from testconfigurationcommon import configtester from testmodelingcommon import modeltestermixin floatstensor idstensor from testpipelinemixin import pipelinetestermixin tolerance 1e4 if istorchavailable import torch from transformers import autoformerconfig autoformerforprediction autoformermodel from transformers models autoformer modelingautoformer import autoformerdecoder autoformerencoder requiretorch class autoformermodeltester def init self parent dmodel16 batchsize13 predictionlength7 contextlength14 labellength10 cardinality19 embeddingdimension5 numtimefeatures4 istrainingtrue hiddensize16 numhiddenlayers2 numattentionheads4 intermediatesize4 hiddenactgelu hiddendropoutprob0 1 attentionprobsdropoutprob0 1 lagssequence1 2 3 4 5 movingaverage25 autocorrelationfactor5 self dmodel dmodel self parent parent self batchsize batchsize self predictionlength predictionlength self contextlength contextlength self cardinality cardinality self numtimefeatures numtimefeatures self lagssequence lagssequence self embeddingdimension embeddingdimension self istraining istraining self hiddensize hiddensize self numhiddenlayers numhiddenlayers self numattentionheads numattentionheads self intermediatesize intermediatesize self hiddenact hiddenact self hiddendropoutprob hiddendropoutprob self attentionprobsdropoutprob attentionprobsdropoutprob self encoderseqlength contextlength self decoderseqlength predictionlength labellength self labellength labellength self movingaverage movingaverage self autocorrelationfactor autocorrelationfactor def getconfigself return autoformerconfig dmodelself dmodel encoderlayersself numhiddenlayers decoderlayersself numhiddenlayers encoderattentionheadsself numattentionheads decoderattentionheadsself numattentionheads encoderffndimself intermediatesize decoderffndimself intermediatesize dropoutself hiddendropoutprob attentiondropoutself attentionprobsdropoutprob predictionlengthself predictionlength contextlengthself contextlength labellengthself labellength lagssequenceself lagssequence numtimefeaturesself numtimefeatures numstaticcategoricalfeatures1 cardinalityself cardinality embeddingdimensionself embeddingdimension movingaverageself movingaverage def prepareautoformerinputsdictself config pastlength config contextlength maxconfig lagssequence staticcategoricalfeatures idstensorself batchsize 1 config cardinality0 pasttimefeatures floatstensorself batchsize pastlength config numtimefeatures pastvalues floatstensorself batchsize pastlength pastobservedmask floatstensorself batchsize pastlength 0 5 decoder inputs futuretimefeatures floatstensorself batchsize config predictionlength config numtimefeatures futurevalues floatstensorself batchsize config predictionlength inputsdict pastvalues pastvalues staticcategoricalfeatures staticcategoricalfeatures pasttimefeatures pasttimefeatures pastobservedmask pastobservedmask futuretimefeatures futuretimefeatures futurevalues futurevalues return inputsdict def prepareconfigandinputsself config self getconfig inputsdict self prepareautoformerinputsdictconfig return config inputsdict def prepareconfigandinputsforcommonself config inputsdict self prepareconfigandinputs return config inputsdict def checkencoderdecodermodelstandaloneself config inputsdict model autoformermodelconfigconfig totorchdevice eval outputs modelinputsdict encoderlasthiddenstate outputs encoderlasthiddenstate lasthiddenstate outputs lasthiddenstate with tempfile temporarydirectory as tmpdirname encoder model getencoder encoder savepretrainedtmpdirname encoder autoformerencoder frompretrainedtmpdirname totorchdevice transformerinputs feature model createnetworkinputsinputsdict seasonalinput trendinput model decompositionlayertransformerinputs config contextlength encinput torch cat transformerinputs config contextlength feature config contextlength dim1 encoderlasthiddenstate2 encoderinputsembedsencinput0 self parent asserttrueencoderlasthiddenstate2 encoderlasthiddenstate abs max item 1e3 mean torch meantransformerinputs config contextlength dim1 unsqueeze1 repeat1 config predictionlength 1 zeros torch zeros transformerinputs shape0 config predictionlength transformerinputs shape2 deviceencinput device decinput torch cat torch catseasonalinput config labellength zeros dim1 feature config contextlength config labellength dim1 trendinit torch cat torch cattrendinput config labellength mean dim1 feature config contextlength config labellength dim1 with tempfile temporarydirectory as tmpdirname decoder model getdecoder decoder savepretrainedtmpdirname decoder autoformerdecoder frompretrainedtmpdirname totorchdevice lasthiddenstate2 decoder trendtrendinit inputsembedsdecinput encoderhiddenstatesencoderlasthiddenstate 0 self parent asserttruelasthiddenstate2 lasthiddenstate abs max item 1e3 requiretorch class autoformermodeltestmodeltestermixin pipelinetestermixin unittest testcase allmodelclasses autoformermodel autoformerforprediction if istorchavailable else allgenerativemodelclasses autoformerforprediction if istorchavailable else pipelinemodelmapping featureextraction autoformermodel if istorchavailable else testpruning false testheadmasking false testmissingkeys false testtorchscript false testinputsembeds false testmodelcommonattributes false def setupself self modeltester autoformermodeltesterself self configtester configtesterself configclassautoformerconfig hastextmodalityfalse def testconfigself self configtester runcommontests def testsaveloadstrictself config inputsdict self modeltester prepareconfigandinputs for modelclass in self allmodelclasses model modelclassconfig with tempfile temporarydirectory as tmpdirname model savepretrainedtmpdirname model2 info modelclass frompretrainedtmpdirname outputloadinginfotrue self assertequalinfomissingkeys def testencoderdecodermodelstandaloneself configandinputs self modeltester prepareconfigandinputsforcommon self modeltester checkencoderdecodermodelstandaloneconfigandinputs unittest skipreasonmodel has no tokens embeddings def testresizetokensembeddingsself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantfalseself pass unittest skip reasonthis architecure seem to not compute gradients properly when using gc check https github comhuggingfacetransformerspull27124 def testtraininggradientcheckpointingusereentrantself pass input is staticcategoricalfeatures not inputids def testmodelmaininputnameself modelsignature inspect signaturegetattrautoformermodel forward the main input is the name of the argument after self observedmaininputname listmodelsignature parameters keys1 self assertequalautoformermodel maininputname observedmaininputname def testforwardsignatureself config self modeltester prepareconfigandinputsforcommon for modelclass in self allmodelclasses model modelclassconfig signature inspect signaturemodel forward signature parameters is an ordereddict so argnames order is deterministic argnames signature parameters keys expectedargnames pastvalues pasttimefeatures pastobservedmask staticcategoricalfeatures staticrealfeatures futurevalues futuretimefeatures if model class name in autoformerforprediction expectedargnames appendfutureobservedmask expectedargnames extend decoderattentionmask headmask decoderheadmask crossattnheadmask encoderoutputs pastkeyvalues outputhiddenstates outputattentions usecache returndict self assertlistequalargnames lenexpectedargnames expectedargnames def testattentionoutputsself config inputsdict self modeltester prepareconfigandinputsforcommon config returndict true seqlen getattrself modeltester seqlength none decoderseqlength getattrself modeltester decoderseqlength seqlen encoderseqlength getattrself modeltester encoderseqlength seqlen dmodel getattrself modeltester dmodel none numattentionheads getattrself modeltester numattentionheads none dim dmodel numattentionheads for modelclass in self allmodelclasses inputsdictoutputattentions true inputsdictoutputhiddenstates false config returndict true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenattentions self modeltester numhiddenlayers check that outputattentions also work using config del inputsdictoutputattentions config outputattentions true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass attentions outputs encoderattentions self assertequallenattentions self modeltester numhiddenlayers self assertlistequal listattentions0 shape3 self modeltester numattentionheads encoderseqlength dim outlen lenoutputs correctoutlen 7 if lasthiddenstate in outputs correctoutlen 1 if trend in outputs correctoutlen 1 if pastkeyvalues in outputs correctoutlen 1 pastkeyvalues have been returned if loss in outputs correctoutlen 1 if params in outputs correctoutlen 1 self assertequaloutlen correctoutlen decoder attentions decoderattentions outputs decoderattentions self assertisinstancedecoderattentions list tuple self assertequallendecoderattentions self modeltester numhiddenlayers self assertlistequal listdecoderattentions0 shape3 self modeltester numattentionheads decoderseqlength dim cross attentions crossattentions outputs crossattentions self assertisinstancecrossattentions list tuple self assertequallencrossattentions self modeltester numhiddenlayers self assertlistequal listcrossattentions0 shape3 self modeltester numattentionheads decoderseqlength dim check attention is always last and order is fine inputsdictoutputattentions true inputsdictoutputhiddenstates true model modelclassconfig model totorchdevice model eval with torch nograd outputs modelself prepareforclassinputsdict modelclass self assertequaloutlen 2 lenoutputs selfattentions outputs encoderattentions if config isencoderdecoder else outputs attentions self assertequallenselfattentions self modeltester numhiddenlayers self assertlistequal listselfattentions0 shape3 self modeltester numattentionheads encoderseqlength dim isflaky def testretaingradhiddenstatesattentionsself super testretaingradhiddenstatesattentions def preparebatchfilenametrainbatch pt file hfhubdownloadrepoidhfinternaltestingtourismmonthlybatch filenamefilename repotypedataset batch torch loadfile maplocationtorchdevice return batch requiretorch slow class autoformermodelintegrationtestsunittest testcase def testinferencenoheadself model autoformermodel frompretrainedhuggingfaceautoformertourismmonthly totorchdevice batch preparebatch with torch nograd output model pastvaluesbatchpastvalues pasttimefeaturesbatchpasttimefeatures pastobservedmaskbatchpastobservedmask staticcategoricalfeaturesbatchstaticcategoricalfeatures futurevaluesbatchfuturevalues futuretimefeaturesbatchfuturetimefeatures 0 expectedshape torch size 64 model config predictionlength model config labellength model config featuresize self assertequaloutput shape expectedshape expectedslice torch tensor 0 3593 1 3398 0 6330 0 2279 1 5396 0 1792 0 0450 1 3225 0 2335 devicetorchdevice self asserttruetorch allcloseoutput0 3 3 expectedslice atoltolerance def testinferenceheadself model autoformerforprediction frompretrainedhuggingfaceautoformertourismmonthly totorchdevice batch preparebatchvalbatch pt with torch nograd output model pastvaluesbatchpastvalues pasttimefeaturesbatchpasttimefeatures pastobservedmaskbatchpastobservedmask staticcategoricalfeaturesbatchstaticcategoricalfeatures encoderlasthiddenstate expectedshape torch size64 model config contextlength model config dmodel self assertequaloutput shape expectedshape expectedslice torch tensor 0 0734 0 9036 0 8358 4 7186 2 4113 1 9581 1 7953 2 3558 1 2970 devicetorchdevice self asserttruetorch allcloseoutput0 3 3 expectedslice atoltolerance def testseqtoseqgenerationself model autoformerforprediction frompretrainedhuggingfaceautoformertourismmonthly totorchdevice batch preparebatchvalbatch pt with torch nograd outputs model generate staticcategoricalfeaturesbatchstaticcategoricalfeatures pasttimefeaturesbatchpasttimefeatures pastvaluesbatchpastvalues futuretimefeaturesbatchfuturetimefeatures pastobservedmaskbatchpastobservedmask expectedshape torch size64 model config numparallelsamples model config predictionlength self assertequaloutputs sequences shape expectedshape expectedslice torch tensor3130 6763 4056 5293 7053 0786 devicetorchdevice meanprediction outputs sequences meandim1 self asserttruetorch allclosemeanprediction0 3 expectedslice rtol1e1 coding utf 8 2023 the huggingface inc team licensed under the apache license version 2 0 the license you may not use this file except in compliance with the license you may obtain a copy of the license at http www apache org licenses license 2 0 unless required by applicable law or agreed to in writing software distributed under the license is distributed on an as is basis without warranties or conditions of any kind either express or implied see the license for the specific language governing permissions and limitations under the license testing suite for the pytorch autoformer model decoder inputs input is static_categorical_features not input_ids the main input is the name of the argument after self signature parameters is an ordereddict so arg_names order is deterministic check that output_attentions also work using config past_key_values have been returned decoder attentions cross attentions check attention is always last and order is fine
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin TOLERANCE = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class AutoformerModelTester: def __init__( self, parent, d_model=16, batch_size=13, prediction_length=7, context_length=14, label_length=10, cardinality=19, embedding_dimension=5, num_time_features=4, is_training=True, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, lags_sequence=[1, 2, 3, 4, 5], moving_average=25, autocorrelation_factor=5, ): self.d_model = d_model self.parent = parent self.batch_size = batch_size self.prediction_length = prediction_length self.context_length = context_length self.cardinality = cardinality self.num_time_features = num_time_features self.lags_sequence = lags_sequence self.embedding_dimension = embedding_dimension self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.encoder_seq_length = context_length self.decoder_seq_length = prediction_length + label_length self.label_length = label_length self.moving_average = moving_average self.autocorrelation_factor = autocorrelation_factor def get_config(self): return AutoformerConfig( d_model=self.d_model, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, prediction_length=self.prediction_length, context_length=self.context_length, label_length=self.label_length, lags_sequence=self.lags_sequence, num_time_features=self.num_time_features, num_static_categorical_features=1, cardinality=[self.cardinality], embedding_dimension=[self.embedding_dimension], moving_average=self.moving_average, ) def prepare_autoformer_inputs_dict(self, config): _past_length = config.context_length + max(config.lags_sequence) static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0]) past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features]) past_values = floats_tensor([self.batch_size, _past_length]) past_observed_mask = floats_tensor([self.batch_size, _past_length]) > 0.5 future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) future_values = floats_tensor([self.batch_size, config.prediction_length]) inputs_dict = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def prepare_config_and_inputs(self): config = self.get_config() inputs_dict = self.prepare_autoformer_inputs_dict(config) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = AutoformerModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = AutoformerEncoder.from_pretrained(tmpdirname).to(torch_device) transformer_inputs, feature, _, _, _ = model.create_network_inputs(**inputs_dict) seasonal_input, trend_input = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...]) enc_input = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]), dim=-1, ) encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) mean = ( torch.mean(transformer_inputs[:, : config.context_length, ...], dim=1) .unsqueeze(1) .repeat(1, config.prediction_length, 1) ) zeros = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device, ) dec_input = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros), dim=1), feature[:, config.context_length - config.label_length :, ...], ), dim=-1, ) trend_init = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean), dim=1), feature[:, config.context_length - config.label_length :, ...], ), dim=-1, ) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = AutoformerDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( trend=trend_init, inputs_embeds=dec_input, encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class AutoformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () all_generative_model_classes = (AutoformerForPrediction,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": AutoformerModel} if is_torch_available() else {} test_pruning = False test_head_masking = False test_missing_keys = False test_torchscript = False test_inputs_embeds = False test_model_common_attributes = False def setUp(self): self.model_tester = AutoformerModelTester(self) self.config_tester = ConfigTester(self, config_class=AutoformerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) @unittest.skip(reason="Model has no tokens embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass def test_model_main_input_name(self): model_signature = inspect.signature(getattr(AutoformerModel, "forward")) observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(AutoformerModel.main_input_name, observed_main_input_name) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask") expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) d_model = getattr(self.model_tester, "d_model", None) num_attention_heads = getattr(self.model_tester, "num_attention_heads", None) dim = d_model // num_attention_heads for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], ) out_len = len(outputs) correct_outlen = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(out_len, correct_outlen) decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], ) cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, dim], ) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 2, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, dim], ) @is_flaky() def test_retain_grad_hidden_states_attentions(self): super().test_retain_grad_hidden_states_attentions() def prepare_batch(filename="train-batch.pt"): file = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch", filename=filename, repo_type="dataset") batch = torch.load(file, map_location=torch_device) return batch @require_torch @slow class AutoformerModelIntegrationTests(unittest.TestCase): def test_inference_no_head(self): model = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device) batch = prepare_batch() with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], future_values=batch["future_values"], future_time_features=batch["future_time_features"], )[0] expected_shape = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]], device=torch_device ) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): output = model( past_values=batch["past_values"], past_time_features=batch["past_time_features"], past_observed_mask=batch["past_observed_mask"], static_categorical_features=batch["static_categorical_features"], ).encoder_last_hidden_state expected_shape = torch.Size((64, model.config.context_length, model.config.d_model)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]], device=torch_device ) self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly").to(torch_device) batch = prepare_batch("val-batch.pt") with torch.no_grad(): outputs = model.generate( static_categorical_features=batch["static_categorical_features"], past_time_features=batch["past_time_features"], past_values=batch["past_values"], future_time_features=batch["future_time_features"], past_observed_mask=batch["past_observed_mask"], ) expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) self.assertEqual(outputs.sequences.shape, expected_shape) expected_slice = torch.tensor([3130.6763, 4056.5293, 7053.0786], device=torch_device) mean_prediction = outputs.sequences.mean(dim=1) self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1))