
    .i)                         d dl Z ddlmZ ddlmZ ddlmZ ddlmZm	Z	m
Z
mZmZ  G d d	e      Z G d
 de      Z G d de      Z G d de      Z G d de	      Z G d de
      Zg dZy)    N   )Cache)RopeParameters   )Gemma2Config)Gemma2AttentionGemma2DecoderLayerGemma2ForCausalLM	Gemma2MLPGemma2RMSNormc            2       \    e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddedz  dedz  dedz  dedz  dedz  dedz  d	edz  d
edz  dedz  dedz  dedz  dedz  dedz  dedz  dedz  dedz  dee	eef   z  dz  dedz  dedz  dedz  dedz  de
e   dz  dedz  dedz  f0 fdZ xZS )VaultGemmaConfiga  
    This is the configuration class to store the configuration of a [`VaultGemmaModel`]. It is used to instantiate an VaultGemma
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the VaultGemma-7B.
    e.g. [google/vaultgemma-7b](https://huggingface.co/google/vaultgemma-7b)
    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 256000):
            Vocabulary size of the VaultGemma model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`VaultGemmaModel`]
        hidden_size (`int`, *optional*, defaults to 2304):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 9216):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 26):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*, defaults to 4):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
            `num_attention_heads`.
        head_dim (`int`, *optional*, defaults to 256):
            The attention head dimension.
        hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
            if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
        max_position_embeddings (`int`, *optional*, defaults to 8192):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*, defaults to 0):
            Padding token id.
        eos_token_id (`int`, *optional*, defaults to 1):
            End of stream token id.
        bos_token_id (`int`, *optional*, defaults to 2):
            Beginning of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to tie weight embeddings
        rope_parameters (`RopeParameters`, *optional*):
            Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
            a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
            with longer `max_position_embeddings`.
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        query_pre_attn_scalar (`float`, *optional*, defaults to 256):
            scaling factor used on the attention scores
        sliding_window (`int`, *optional*, defaults to 4096):
            in VaultGemma, every other layer uses sliding window attention. This is the size of the sliding window.
        layer_types (`list`, *optional*):
            Attention pattern for each layer.
        final_logit_softcapping (`float`, *optional*, defaults to 30.0):
            scaling factor when applying tanh softcapping on the logits.
        attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
            scaling factor when applying tanh softcapping on the attention scores.

    ```python
    >>> from transformers import VaultGemmaModel, VaultGemmaConfig
    >>> # Initializing a VaultGemma vaultgemma-7b style configuration
    >>> configuration = VaultGemmaConfig()
    >>> # Initializing a model from the vaultgemma-7b style configuration
    >>> model = VaultGemmaModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```N
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headshead_dimhidden_activationmax_position_embeddingsinitializer_rangerms_norm_eps	use_cachepad_token_ideos_token_idbos_token_idtie_word_embeddingsrope_parametersattention_biasattention_dropoutquery_pre_attn_scalarsliding_windowlayer_typesfinal_logit_softcappingattn_logit_softcappingc                     t        |   di d|d|d|d|d|d|d|d|d	|	d
|
d|d|d|d|d|d|d|d|d|d|d|d|d|d|| | `y )Nr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&    )super__init__use_bidirectional_attention)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   kwargs	__class__s                             m/mnt/e/genesis-system/.venv/lib/python3.12/site-packages/transformers/models/vaultgemma/modular_vaultgemma.pyr*   zVaultGemmaConfig.__init__h   s	   8 	 	
!	
#	
 0	
 0		

 !4	
 !4	
 	
 0	
 %<	
 0	
 &	
  	
 &	
 &	
 &	
  !4!	
" ,#	
$ *%	
& 0'	
( #8)	
* *+	
, $-	
. %</	
0 $:3	
8 ,    )i  i 	  i $              gelu_pytorch_tanhi    g{Gz?gư>Tr      r   TNFg        r4   i   Ng      >@g      I@)__name__
__module____qualname____doc__intstrfloatboolr   dictlistr*   __classcell__r.   s   @r/   r   r      s   Mb "("&(,(**+*+"(;.2*.#'!%#$#$#$+/MQ&+*-,/%)(,04/338-$J8- 4Z8- :	8-
 :8- !4Z8- !4Z8- *8- :8- "%t8- !4<8- Dj8- $;8- Dj8- Dj8-  Dj!8-" "D[#8-$ ($sN/B*CCdJ%8-& t'8-( !4<)8-*  #Tz+8-, d
-8-. #Y%/8-0 "'18-2 !&38- 8-r0   r   c                       e Zd Zy)VaultGemmaRMSNormNr7   r8   r9   r(   r0   r/   rD   rD          r0   rD   c                       e Zd Zy)VaultGemmaMLPNrE   r(   r0   r/   rH   rH      rF   r0   rH   c                   ,     e Zd ZdZdedef fdZ xZS )VaultGemmaAttentionz=Multi-headed attention from 'Attention Is All You Need' paperconfig	layer_idxc                 0    t         |           d| _        y )NT)r)   r*   	is_causal)r,   rK   rL   r.   s      r/   r*   zVaultGemmaAttention.__init__   s    r0   )r7   r8   r9   r:   r   r;   r*   rA   rB   s   @r/   rJ   rJ      s    G/ C  r0   rJ   c                   <    e Zd Z fdZ	 	 	 	 ddej
                  deej
                  ej
                  f   dej
                  dz  dej                  dz  dedz  dej                  dz  d	eej                  eej                  ej                  f   dz  f   fd
Z
 xZS )VaultGemmaDecoderLayerc                 ,    t        |   di | | `| `y )Nr(   )r)   r*   post_attention_layernormpost_feedforward_layernorm)r,   super_kwargsr.   s     r/   r*   zVaultGemmaDecoderLayer.__init__   s    (<()+r0   Nhidden_statesposition_embeddingsattention_maskposition_idspast_key_valuescache_positionreturnc           
          |}| j                  |      } | j                  d||||||d|\  }}	||z   }|}| j                  |      }| j                  |      }||z   }|S )N)rU   rV   rW   rX   rY   rZ   r(   )input_layernorm	self_attnpre_feedforward_layernormmlp)
r,   rU   rV   rW   rX   rY   rZ   r-   residual_s
             r/   forwardzVaultGemmaDecoderLayer.forward   s     !,,];)4>> 
' 3)%+)
 
q !=0 66}E/ =0r0   )NNNN)r7   r8   r9   r*   torchTensortuple
LongTensorr   FloatTensorrc   rA   rB   s   @r/   rP   rP      s    , /304(,26|| #5<<#=> t+	
 &&-  ((4/ 
u  %(9(95;L;L(L"MPT"TT	Ur0   rP   c                       e Zd Zy)VaultGemmaForCausalLMNrE   r(   r0   r/   rj   rj      rF   r0   rj   )r   rj   VaultGemmaModelVaultGemmaPreTrainedModel)rd   cache_utilsr   modeling_rope_utilsr   gemma2.configuration_gemma2r   gemma2.modeling_gemma2r   r	   r
   r   r   r   rD   rH   rJ   rP   rj   __all__r(   r0   r/   <module>rr      sn        1 6 u uH-| H-V	 		I 	/ #/ #L	- 	r0   