
    +i:                     `    d dl mZ ddlmZmZ ddlmZ  G d de      Z G d de      ZddgZ	y	)
    )Any   )PreTrainedConfiglayer_type_validation)RopeParametersc            4           e Zd ZdZdZdgZddddddddZdgdgfd	d
gd	gfd	gd	gfdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d'dedz  dedz  dedz  dedz  dedz  dedz  dedz  de	dz  dedz  de
dz  dedz  dedz  dedz  dedz  dedz  dedz  deee	ef   z  dz  dedz  de
dz  d edz  d!edz  d"ee	   dz  d#e
dz  d$e
dz  d%edz  f2 fd&Z xZS )(T5GemmaModuleConfiga  
    This is the configuration class to store the configuration of a [`T5GemmaModuleModel`]. It is used to instantiate an T5GemmaModule
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the T5GemmaModule-7B.
    e.g. [google/t5_gemma_module-7b](https://huggingface.co/google/t5_gemma_module-7b)
    Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PreTrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 256000):
            Vocabulary size of the T5GemmaModule model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`T5GemmaModuleModel`]
        hidden_size (`int`, *optional*, defaults to 2304):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 9216):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 26):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*, defaults to 4):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
            `num_attention_heads`.
        head_dim (`int`, *optional*, defaults to 256):
            The attention head dimension.
        hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
            if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
        max_position_embeddings (`int`, *optional*, defaults to 8192):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*, defaults to 0):
            Padding token id.
        eos_token_id (`int`, *optional*, defaults to 1):
            End of stream token id.
        bos_token_id (`int`, *optional*, defaults to 2):
            Beginning of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to tie weight embeddings
        rope_parameters (`RopeParameters`, *optional*):
            Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
            a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
            with longer `max_position_embeddings`.
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        query_pre_attn_scalar (`float`, *optional*, defaults to 256):
            scaling factor used on the attention scores
        sliding_window (`int`, *optional*, defaults to 4096):
            in T5GemmaModule, every other layer uses sliding window attention. This is the size of the sliding window.
        layer_types (`list`, *optional*):
            Attention pattern for each layer.
        final_logit_softcapping (`float`, *optional*, defaults to 30.0):
            scaling factor when applying tanh softcapping on the logits.
        attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
            scaling factor when applying tanh softcapping on the attention scores.
        is_decoder (`bool`, *optional*, defaults to `False`):
            Whether to only use the decoder in an encoder-decoder architecture, otherwise it has no effect on
            decoder-only or encoder-only architectures.

    ```python
    >>> from transformers import T5GemmaModuleModel, T5GemmaModuleConfig
    >>> # Initializing a T5GemmaModule t5_gemma_module-7b style configuration
    >>> configuration = T5GemmaModuleConfig()
    >>> # Initializing a model from the t5_gemma_module-7b style configuration
    >>> model = T5GemmaModuleModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```t5_gemma_modulepast_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnormN
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headshead_dimhidden_activationmax_position_embeddingsinitializer_rangerms_norm_eps	use_cachepad_token_ideos_token_idbos_token_idtie_word_embeddingsrope_parametersattention_biasattention_dropoutquery_pre_attn_scalarsliding_windowlayer_typesfinal_logit_softcappingattn_logit_softcapping
is_decoderc                 Z   || _         || _        || _        || _        || _        || _        |	| _        || _        || _        || _	        || _
        || _        || _        |
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        | j.                  ;t1        | j                        D cg c]  }t3        |dz   dz        rdnd c}| _        t5        | j.                  | j                         || _        t9        | t  di | y c c}w )N      sliding_attentionfull_attention )r-   r!   r#   r"   r$   r   r   r   r   r   r   r   r   r   r   r    r&   r'   r   r(   r)   r+   r,   r*   rangeboolr   r%   super__init__)selfr   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   kwargsi	__class__s                               m/mnt/e/genesis-system/.venv/lib/python3.12/site-packages/transformers/models/t5gemma/configuration_t5gemma.pyr7   zT5GemmaModuleConfig.__init__   s<   : %(((#6 $'>$&!2!2#6  #6 !2(",!2!2%:",'>$&<#&#X]^b^t^tXu STtQUaK'8#>NN D 	d..0F0FG."6" s   D()  i 	  i $              gelu_pytorch_tanhi    g{Gz?gư>Tr   r/   r0   TNF        rA   i   Ng      >@g      I@F)__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planintstrfloatr5   r   dictlistr7   __classcell__r;   s   @r<   r	   r	      s5   Pd #J#4"5%.%.%.%."+ )"+ &(9:#%568IJ!"_$56 "("&(,(**+*+"(;.2*.#'!%#$#$#$+/MQ&+*-,/%)(,04/3"'5>#$J># 4Z># :	>#
 :># !4Z># !4Z># *># :># "%t># !4<># Dj># $;># Dj># Dj>#  Dj!>#" "D[#>#$ ($sN/B*CCdJ%>#& t'>#( !4<)>#*  #Tz+>#, d
->#. #Y%/>#0 "'1>#2 !&3>#4 4K5># >#    r	   c                        e Zd ZdZdZdgZeedZ	 	 	 	 	 	 	 	 ddeee	e	f   z  dz  deee	e	f   z  dz  de
dz  d	edz  d
edz  dedz  de
dz  dedz  f fdZ xZS )T5GemmaConfiga  
    This is the configuration class to store the configuration of a [`T5GemmaModel`]. It is used to instantiate an T5Gemma
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to a hypothetical balanced Gemma2 encoder-decoder model.
    e.g. [google/t5gemma-2b-2b-prefixlm-it](https://huggingface.co/google/t5gemma-2b-2b-prefixlm-it)
    ```python
    >>> from transformers import T5GemmaConfig, T5GemmaModel
    >>> t5gemma_config = T5GemmaConfig.from_pretrained("google/t5gemma-2b-2b-prefixlm-it")
    >>> model = T5GemmaModel(t5gemma_config)
    ```
    Configuration objects inherit from [PreTrainedConfig] and can be used to control the model outputs. Read the
    documentation from [PreTrainedConfig] for more information.
    Args:
        encoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
            Configuration for the encoder.
        decoder (`Union[T5GemmaModuleConfig, dict]`, optional, *optional*):
            Configuration for the decoder.
        is_encoder_decoder (bool, optional, *optional*, defaults to `True`):
            Whether the model is used as an encoder/decoder or not.
        dropout_rate (`float`, *optional*, defaults to 0.0):
            The ratio for all dropout layers (following T5).
        classifier_dropout_rate (`float`, *optional*, defaults to 0.0):
            The dropout ratio for classifier (following T5).
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for attention.
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
            Whether tie input and output embeddings.
        vocab_size (`int`, *optional*, defaults to 256000):
            Vocabulary size of the T5Gemma model (the same as Gemma 2).
        kwargs (additional keyword arguments, optional, *optional*):
            Will be passed to the PreTrainedConfig base class.
    t5gemmar   )encoderdecoderNrW   rX   is_encoder_decoderdropout_rateclassifier_dropout_rater'   r$   r   c	                    t        |t              rt        di |}n0|t               }n#t        |t              sJ t        |       d       t        |t              rt        di |}n(||}n#t        |t              sJ t        |       d       t        di |j	                         }t        di |j	                         }d|_        ||_        ||_        || _        d|_        d|_	        ||_        ||_        |j                  |_        || _        dD ]  }
|
|	vst        ||
      |	|
<    t        | <  di |	 || _        |	j#                  d|j$                        | _        || _        || _        || _        y )Nz is not supported.FT)r#   r!   r"   r   r3   )
isinstancerO   r	   typeto_dictr-   rZ   r'   rW   r    r   cross_attention_hidden_sizerX   getattrr6   r7   rY   getr   r[   r$   r   )r8   rW   rX   rY   rZ   r[   r'   r$   r   r9   special_token_keyr;   s              r<   r7   zT5GemmaConfig.__init__   s    gt$)4G4G_)+Gg':;aWN`=aa;gt$)4G4G_Gg':;aWN`=aa;%:(9:%:(9:"+$5!! +$5!.5.A.A+!Q 	P .,3G=N,O()	P 	"6""4!',?AZAZ!['>$#6  %rS   )NNTrC   rC   rC   Tr=   )rD   rE   rF   rG   rH   rI   r	   sub_configsrO   r   r5   rN   rL   r7   rQ   rR   s   @r<   rU   rU      s    B J#4"51>QRK @D?C*.%(03*-+/!'5%$tCH~5<5% %tCH~5<5% !4K	5%
 dl5% "'5% !4<5% "D[5% $J5% 5%rS   rU   N)
typingr   configuration_utilsr   r   modeling_rope_utilsr   r	   rU   __all__r3   rS   r<   <module>ri      s=   *  J 1b#* b#J[%$ [%| 1
2rS   