kaizen9's picture
Upload model checkpoints directly from S3
958d6f8 verified
raw
history blame contribute delete
No virus
7.3 kB
from typing import Callable
import torch
from .registry_utils import create_registry
_norms_description = 'The norms registry is used to register classes that implement normalization layers.\n\n One example of this is torch.nn.LayerNorm. See norm.py for examples.\n\n Args:\n normalized_shape Union[int, List[int], torch.Size]: The shape of the input tensor.\n device: Optional[torch.device]: The device to use for the normalization layer.\n\n Returns:\n torch.nn.Module: The normalization layer.\n '
norms = create_registry('llmfoundry', 'norms', generic_type=type[torch.nn.Module], entry_points=True, description=_norms_description)
_fcs_description = 'The fcs registry is used to register classes that implement fully connected layers (i.e. torch.nn.Linear).\n\n See fc.py for examples.\n\n Args:\n in_features: int: The number of input features.\n out_features: int: The number of output features.\n kwargs: Dict[str, Any]: Additional keyword arguments to pass to the layer.\n\n Returns:\n torch.nn.Module: The fully connected layer.\n '
fcs = create_registry('llmfoundry', 'fcs', generic_type=type[torch.nn.Module], entry_points=True, description=_fcs_description)
_ffns_description = 'The ffns registry is used to register functions that build FFN layers.\n\n These layers are generally composed of fc layers and activation functions.\n One example is MPTMLP. See ffn.py for examples.\n\n Args:\n d_model: int: The size of the input and output tensors.\n expansion_ratio: float: The expansion ratio for the hidden layer.\n device: Optional[str]: The device to use for the layer.\n bias: bool: Whether or not to include a bias term.\n kwargs: Dict[str, Any]: Additional keyword arguments to pass to the layer.\n\n Returns:\n torch.nn.Module: The FFN layer.\n '
ffns = create_registry('llmfoundry', 'ffns', generic_type=Callable, entry_points=True, description=_ffns_description)
_ffns_with_norm_description = 'The ffns_with_norm registry is used to register functions that build FFN layers with normalization.\n\n The resulting layer will have ._has_norm set on it.\n One example is te.LayerNormMLP. See ffn.py for examples.\n\n Args:\n d_model: int: The size of the input and output tensors.\n expansion_ratio: float: The expansion ratio for the hidden layer.\n device: Optional[str]: The device to use for the layer.\n bias: bool: Whether or not to include a bias term.\n kwargs: Dict[str, Any]: Additional keyword arguments to pass to the layer.\n\n Returns:\n torch.nn.Module: The FFN layer.\n '
ffns_with_norm = create_registry('llmfoundry', 'ffns_with_norm', generic_type=Callable, entry_points=True, description=_ffns_with_norm_description)
_ffns_with_megablocks_description = 'The ffns_with_megablocks registry is used to register functions that build ffn layers using MegaBlocks.' + 'See ffn.py for examples.'
_ffns_with_megablocks_description = 'The ffns_with_megablocks registry is used to register functions that build FFN layers using MegaBlocks.\n\n The resulting layer will have ._uses_megablocks set on it.\n One example is megablocks.layers.dmoe.dMoE. See ffn.py for examples.\n\n Returns:\n torch.nn.Module: The FFN layer.\n '
ffns_with_megablocks = create_registry('llmfoundry', 'ffns_with_megablocks', generic_type=Callable, entry_points=True, description=_ffns_with_megablocks_description)
_attention_classes_description = 'The attention_classes registry is used to register classes that implement attention layers.\n\n The kwargs are passed directly to the constructor of the class.\n One example is GroupedQueryAttention. See attention.py for examples.\n\n Args:\n kwargs: Dict[str, Any]: Additional keyword arguments to pass to the layer.\n\n Returns:\n torch.nn.Module: The attention layer.\n '
attention_classes = create_registry('llmfoundry', 'attention_classes', generic_type=type[torch.nn.Module], entry_points=True, description=_attention_classes_description)
_attention_implementations_description = "The attention_implementations registry is used to register functions that implement the attention operation.\n\n One example is 'flash'. See attention.py for examples.\n\n Args:\n query (torch.Tensor): The query tensor.\n key (torch.Tensor): The key tensor.\n value (torch.Tensor): The value tensor.\n n_heads (int): The number of attention heads.\n kv_n_heads (int): The number of attention heads for the key and value tensors.\n past_key_value (Optional[tuple[torch.Tensor, torch.Tensor]]): The past key and value tensors.\n softmax_scale (Optional[float]) = None\n attn_bias (Optional[torch.Tensor]) = None\n is_causal (bool) = False\n dropout_p (float) = 0.0\n training (bool) = True\n needs_weights (bool) = False\n kwargs: Dict[str, Any]: Additional keyword arguments the implementation accepts.\n\n Returns:\n tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:\n The output tensor, the attention weights, and the past key and value tensors.\n "
attention_implementations = create_registry('llmfoundry', 'attention_implementations', generic_type=Callable, entry_points=True, description=_attention_implementations_description)
_param_init_fns_description = "The param_init_fns registry is used to register functions that initialize parameters.\n\n These functions should take in a torch.nn.Module, additional kwargs, and initialize the parameters of the module.\n Generally they can call generic_param_init_fn_ with an appropriate partial function. See param_init_fns.py for examples.\n\n Note: These functions should take in arbitrary kwargs, and discard any they don't need.\n\n Args:\n module: torch.nn.Module: The module to initialize.\n kwargs: Dict[str, Any]: Additional keyword arguments to use for initialization.\n "
param_init_fns = create_registry('llmfoundry', 'param_init_fns', generic_type=Callable[..., None], entry_points=True, description=_param_init_fns_description)
_module_init_fns_description = 'The module_init_fns registry is used to register functions that initialize specific modules.\n\n These functions should return True if they initialize the module, and False otherwise.\n This allows them to be called without knowing their contents. They should take in the module and additional kwargs.\n If multiple functions can initialize the module, the one that is registered first will be used, so it is recommended to\n override an existing function if you want to change existing initialization behavior, and add new functions if you have new\n layer types. See param_init_fns.py for details.\n\n Args:\n module: torch.nn.Module: The module to initialize.\n kwargs: Dict[str, Any]: Additional keyword arguments to use for initialization.\n\n Returns:\n bool: Whether or not the module was initialized.\n '
module_init_fns = create_registry('llmfoundry', 'module_init_fns', generic_type=Callable[..., bool], entry_points=True, description=_module_init_fns_description)