|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" Gemma model configuration""" |
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.utils import logging |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
GEMMA_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
|
"google/gemma-2b": "https://huggingface.co/google/gemma-2b/resolve/main/config.json", |
|
} |
|
|
|
|
|
class GemmaConfig(PretrainedConfig): |
|
model_type = "gemma" |
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
|
|
def __init__( |
|
self, |
|
vocab_size=51200, |
|
hidden_size=2048, |
|
intermediate_size=8192, |
|
num_hidden_layers=24, |
|
num_attention_heads=32, |
|
num_key_value_heads=None, |
|
resid_pdrop=0.0, |
|
embd_pdrop=0.0, |
|
attention_dropout=0.0, |
|
hidden_act="gelu_new", |
|
max_position_embeddings=2048, |
|
initializer_range=0.02, |
|
layer_norm_eps=1e-5, |
|
use_cache=True, |
|
tie_word_embeddings=False, |
|
rope_theta=10000.0, |
|
rope_scaling=None, |
|
partial_rotary_factor=0.5, |
|
qk_layernorm=False, |
|
bos_token_id=1, |
|
eos_token_id=2, |
|
**kwargs, |
|
): |
|
self.vocab_size = vocab_size |
|
self.hidden_size = hidden_size |
|
self.intermediate_size = intermediate_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
|
|
if num_key_value_heads is None: |
|
num_key_value_heads = num_attention_heads |
|
|
|
self.num_key_value_heads = num_key_value_heads |
|
self.resid_pdrop = resid_pdrop |
|
self.embd_pdrop = embd_pdrop |
|
self.attention_dropout = attention_dropout |
|
self.hidden_act = hidden_act |
|
self.max_position_embeddings = max_position_embeddings |
|
self.initializer_range = initializer_range |
|
self.layer_norm_eps = layer_norm_eps |
|
self.use_cache = use_cache |
|
self.rope_theta = rope_theta |
|
self.rope_scaling = rope_scaling |
|
self.partial_rotary_factor = partial_rotary_factor |
|
self.qk_layernorm = qk_layernorm |
|
self._rope_scaling_validation() |
|
|
|
super().__init__( |
|
bos_token_id=bos_token_id, |
|
eos_token_id=eos_token_id, |
|
tie_word_embeddings=tie_word_embeddings, |
|
**kwargs, |
|
) |
|
|
|
def _rope_scaling_validation(self): |
|
""" |
|
Validate the `rope_scaling` configuration. |
|
""" |
|
if self.rope_scaling is None: |
|
return |
|
|
|
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: |
|
raise ValueError( |
|
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " |
|
f"got {self.rope_scaling}" |
|
) |
|
rope_scaling_type = self.rope_scaling.get("type", None) |
|
rope_scaling_factor = self.rope_scaling.get("factor", None) |
|
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: |
|
raise ValueError( |
|
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" |
|
) |
|
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: |
|
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") |
|
|
|
|
|
from typing import Union |
|
from transformers import PretrainedConfig |
|
import os |
|
|
|
|
|
class SigLipVisionConfig(PretrainedConfig): |
|
model_type = "siglip_vision_model" |
|
|
|
def __init__( |
|
self, |
|
hidden_size=1152, |
|
image_mean=(0.5, 0.5, 0.5), |
|
intermediate_size=4304, |
|
num_hidden_layers=27, |
|
num_attention_heads=16, |
|
num_channels=3, |
|
image_size=384, |
|
patch_size=14, |
|
hidden_act="gelu_pytorch_tanh", |
|
layer_norm_eps=1e-6, |
|
attention_dropout=0.0, |
|
**kwargs, |
|
): |
|
super().__init__(**kwargs) |
|
|
|
self.hidden_size = hidden_size |
|
self.intermediate_size = intermediate_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.num_channels = num_channels |
|
self.patch_size = patch_size |
|
self.image_size = image_size |
|
self.attention_dropout = attention_dropout |
|
self.layer_norm_eps = layer_norm_eps |
|
self.hidden_act = hidden_act |
|
self.image_mean = image_mean |
|
|
|
@classmethod |
|
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": |
|
cls._set_token_in_kwargs(kwargs) |
|
|
|
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) |
|
|
|
|
|
if config_dict.get("model_type") == "siglip": |
|
config_dict = config_dict["vision_config"] |
|
|
|
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: |
|
logger.warning( |
|
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " |
|
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." |
|
) |
|
|
|
return cls.from_dict(config_dict, **kwargs) |
|
|
|
|
|
class CeruleGemmaConfig(GemmaConfig): |
|
model_type = "cerule-gemma" |
|
|
|
def __init__(self, **kwargs): |
|
self.gemma_config = GemmaConfig(**kwargs) |
|
super().__init__(**kwargs) |