
    ci                         d Z ddlmZ ddlmZ ddlmZ ddlmZ  G d dej                        Z
 G d d	ej                        Z G d
 dej                        Zy)zModel-based Pointwise Metric.    )Union)	constants)_base)metric_prompt_templatec                   J     e Zd ZdZdedeej                  ef   f fdZ xZ	S )PointwiseMetrica  A Model-based Pointwise Metric.

    A model-based evaluation metric that evaluate a single generative model's
    response.

    For more details on when to use model-based pointwise metrics, see
    [Evaluation methods and metrics](https://cloud.google.com/vertex-ai/generative-ai/docs/models/determine-eval).

    Usage Examples:

        ```
        candidate_model = GenerativeModel("gemini-1.5-pro")
        eval_dataset = pd.DataFrame({
            "prompt"  : [...],
        })
        fluency_metric = PointwiseMetric(
            metric="fluency",
            metric_prompt_template=MetricPromptTemplateExamples.get_prompt_template('fluency'),
        )
        pointwise_eval_task = EvalTask(
            dataset=eval_dataset,
            metrics=[
                fluency_metric,
                MetricPromptTemplateExamples.Pointwise.GROUNDEDNESS,
            ],
        )
        pointwise_result = pointwise_eval_task.evaluate(
            model=candidate_model,
        )
        ```
    metricr   c                (    t         |   ||       y)a  Initializes a pointwise evaluation metric.

        Args:
          metric: The pointwise evaluation metric name.
          metric_prompt_template: Pointwise metric prompt template for performing
            the model-based evaluation. A freeform string is also accepted.
        )r   r	   N)super__init__)selfr	   r   	__class__s      S/tmp/pip-target-z3e9_cxr/lib/python/vertexai/evaluation/metrics/pointwise_metric.pyr   zPointwiseMetric.__init__=   s     	#9 	 	
    )
__name__
__module____qualname____doc__strr   metric_prompt_template_basePointwiseMetricPromptTemplater   __classcell__r   s   @r   r   r      s:    @
 
 !&'EEsJ!
	
 
r   r   c                   f     e Zd ZdZej
                  j                  Zdddddededef fdZ	 xZ
S )	CometzzA COMET metric.

    Evaluates a score for the given instance using
    https://huggingface.co/Unbabel/wmt22-comet-da
    COMET_22_SRC_REFNversionsource_languagetarget_languager   r   r    c                H    t         |   t        j                  |||       y)a.  Initializes the COMET metric.

        Args:
          version: The COMET version to use for evaluation eg.
            "COMET_22_SRC_REF".
          source_language: Optional. The source language of the translation.
          target_language: Optional. The target language of the translation.
        namer   r   r    N)r   r   r   _metric_namer   r   r   r    r   s       r   r   zComet.__init__[   s)      	##++	 	 	
r   )r   r   r   r   r   MetricCOMETr$   r   r   r   r   s   @r   r   r   R   sN     ##))L
 *##
 
 	

 
 
r   r   c                   f     e Zd ZdZej
                  j                  Zdddddededef fdZ	 xZ
S )	MetricXzyA MetricX metric.

    Evaluates a score for the given instance using
    https://github.com/google-research/metricx
    METRICX_24_SRC_REFNr   r   r   r    c                H    t         |   t        j                  |||       y)af  Initializes the MetricX metric.

        Args:
          version: The MetricX version to use for evaluation. Can be one of
            "METRICX_24_SRC_REF", "METRICX_24_SRC", or "METRICX_24_REF".
          source_language: Optional. The source language of the translation.
          target_language: Optional. The target language of the translation.
        r"   N)r   r   r)   r$   r%   s       r   r   zMetricX.__init__|   s)      	%%++	 	 	
r   )r   r   r   r   r   r&   METRICXr$   r   r   r   r   s   @r   r)   r)   s   sN     ##++L
 ,##
 
 	

 
 
r   r)   N)r   typingr   vertexai.evaluationr   vertexai.evaluation.metricsr   r   r   _ModelBasedMetricr   _TranslationMetricr   r)    r   r   <module>r3      sP   " $  ) -
3
e-- 3
l
E$$ 
B
e&& 
r   