o
    }ThD                     @   sL   d Z ddlmZ ddlZddlmZmZ ddlm	Z	 dddZ
dd
dZdS )z
Common code for all metrics.

    )combinationsN   )check_arraycheck_consistent_length)type_of_targetc                 C   s  d}||vrt d|t|}|dvrt d||dkr'| |||dS t||| t|}t|}d}|}d}	|d	krU|durLt||jd }| }| }n4|d
kr|durmtj	t
|t|ddd}	ntj	|dd}	t|		 dr~dS n
|dkr|}	d}d}|jdkr|d}|jdkr|d}|j| }
t|
f}t|
D ]}|j|g|d }|j|g|d }| |||d||< q|dur|	durt|	}	d||	dk< tj||	dS |S )aM  Average a binary metric for multilabel classification.

    Parameters
    ----------
    y_true : array, shape = [n_samples] or [n_samples, n_classes]
        True binary labels in binary label indicators.

    y_score : array, shape = [n_samples] or [n_samples, n_classes]
        Target scores, can either be probability estimates of the positive
        class, confidence values, or binary decisions.

    average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'
        If ``None``, the scores for each class are returned. Otherwise,
        this determines the type of averaging performed on the data:

        ``'micro'``:
            Calculate metrics globally by considering each element of the label
            indicator matrix as a label.
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean.  This does not take label imbalance into account.
        ``'weighted'``:
            Calculate metrics for each label, and find their average, weighted
            by support (the number of true instances for each label).
        ``'samples'``:
            Calculate metrics for each instance, and find their average.

        Will be ignored when ``y_true`` is binary.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    binary_metric : callable, returns shape [n_classes]
        The binary metric function to use.

    Returns
    -------
    score : float or array of shape [n_classes]
        If not ``None``, average the score, else return the score for each
        classes.

    )Nmicromacroweightedsampleszaverage has to be one of {0})binaryzmultilabel-indicatorz{0} format is not supportedr   )sample_weight   Nr   r	   )r   r   )axisg        r
   weights)
ValueErrorformatr   r   r   nprepeatshaperavelsummultiplyreshapeisclosendimzerosrangetakeasarrayaverage)binary_metricy_truey_scorer!   r   average_optionsy_typenot_average_axisscore_weightaverage_weight	n_classesscorecy_true_c	y_score_c r/   V/home/air/segue/gemini/back/venv/lib/python3.10/site-packages/sklearn/metrics/_base.py_average_binary_score   s`   +






r1   r   c                 C   s   t || t|}|jd }||d  d }t|}|dk}|r't|nd}	tt|dD ]?\}
\}}||k}||k}t||}|rMt||	|
< || }|| }| ||||f }| ||||f }|| d ||
< q0tj||	dS )aL  Average one-versus-one scores for multiclass classification.

    Uses the binary metric for one-vs-one multiclass classification,
    where the score is computed according to the Hand & Till (2001) algorithm.

    Parameters
    ----------
    binary_metric : callable
        The binary metric function to use that accepts the following as input:
            y_true_target : array, shape = [n_samples_target]
                Some sub-array of y_true for a pair of classes designated
                positive and negative in the one-vs-one scheme.
            y_score_target : array, shape = [n_samples_target]
                Scores corresponding to the probability estimates
                of a sample belonging to the designated positive class label

    y_true : array-like of shape (n_samples,)
        True multiclass labels.

    y_score : array-like of shape (n_samples, n_classes)
        Target scores corresponding to probability estimates of a sample
        belonging to a particular class.

    average : {'macro', 'weighted'}, default='macro'
        Determines the type of averaging performed on the pairwise binary
        metric scores:
        ``'macro'``:
            Calculate metrics for each label, and find their unweighted
            mean. This does not take label imbalance into account. Classes
            are assumed to be uniformly distributed.
        ``'weighted'``:
            Calculate metrics for each label, taking into account the
            prevalence of the classes.

    Returns
    -------
    score : float
        Average of the pairwise binary metric scores.
    r   r   r   r	   Nr   )	r   r   uniquer   empty	enumerater   
logical_orr!   )r"   r#   r$   r!   y_true_uniquer*   n_pairspair_scoresis_weighted
prevalenceixaba_maskb_maskab_maska_trueb_truea_true_scoreb_true_scorer/   r/   r0   _average_multiclass_ovo_score~   s&   
(


rE   )N)r   )__doc__	itertoolsr   numpyr   utilsr   r   utils.multiclassr   r1   rE   r/   r/   r/   r0   <module>   s    
m