o
    ?Hhc                  
   @   s  d Z ddlZddlZddlZddlZddlmZmZ ddlZ	ddl
mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZmZmZ ddlmZmZm Z m!Z!m"Z" ddl#m$Z$m%Z% ddl&m'Z'm(Z(m)Z)m*Z* ddl+m,Z,m-Z-m.Z. dd Z/dd Z0dddddde	1e	j2j3dddZ4dd Z5edgd gd gd!dd"dddddde	1e	j2j3dd#d$d%Z6G d&d' d'e,Z7G d(d) d)e7Z8ddddddde	1e	j2j3fd*d+Z9G d,d- d-e7Z:dS ).zUGraphicalLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
    N)IntegralReal)linalg   )_fit_context)ConvergenceWarning)_cd_fast)lars_path_gram)check_cvcross_val_score)Bunch)Interval
StrOptionsvalidate_params)MetadataRouterMethodMapping_raise_for_params_routing_enabledprocess_routing)Paralleldelayed)_is_arraylike_not_scalarcheck_random_statecheck_scalarvalidate_data   )EmpiricalCovarianceempirical_covariancelog_likelihoodc                 C   sZ   |j d }dt| | |tdtj   }||t| tt|   7 }|S )zEvaluation of the graphical-lasso objective function

    the objective function is made of a shifted scaled version of the
    normalized log-likelihood (i.e. its empirical mean over the samples) and a
    penalisation term to promote sparsity
    r          r   )shaper   nplogpiabssumdiag)mle
precision_alphapcost r,   _/home/air/sanwanet/gpt-api/venv/lib/python3.10/site-packages/sklearn/covariance/_graph_lasso.py
_objective-   s   
"*r.   c                 C   sJ   t | | }||jd 8 }||t | t t |   7 }|S )zExpression of the dual gap convergence criterion

    The specific definition is given in Duchi "Projected Subgradient Methods
    for Learning Sparse Gaussians".
    r   )r!   r%   r    r$   r&   )emp_covr(   r)   gapr,   r,   r-   	_dual_gap:   s   *r1   cd-C6?d   F)cov_initmodetolenet_tolmax_iterverboseepsc                C   s  | j \}	}
|dkr2t| }dt| | }||
tdtj  7 }t| | |
 }| |||fdfS |d u r;|  }n| }|d9 }| j	d d |
d  }||j	d d |
d < t
|}t|
}d}t }|dkrqtddd	}ntdd
}zRtj}tj|dd dd f dd}t|D ].}t|
D ]}|dkr|d }|| ||k ||< |d d |f ||k |d d |f< n|dd dd f |d d < | |||kf }tjdi |I |dkr|||k|f |||f d|    }t||d|||||td d
\}}	}	}	nt|||j||
d  d|ddd\}	}	}W d    n	1 s'w   Y  d|||f t|||k|f |  |||f< |||f  | |||k|f< |||f  | ||||kf< t||}|||||kf< ||||k|f< qt| stdt| ||}t| ||}|rtd|||f  |||f t||k r nt|s|dkrtdqtd||f t  W n ty } z|j!d d f|_!|d }~ww ||||d fS )Nr   r   r   gffffff?r   r2   raiseignore)overinvalid)r?   C)orderi  FTlars)XyGram	n_samples	alpha_min	copy_Gramr;   methodreturn_pathg      ?z1The system is too ill-conditioned for this solverz<[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3ezANon SPD result: the system is too ill-conditioned for this solverzDgraphical_lasso: did not converge after %i iteration: dual gap: %.3ez3. The system is too ill-conditioned for this solverr,   )"r    r   invr   r!   r"   r#   r%   copyflatpinvharangelistdictinfrangeerrstatecd_fastenet_coordinate_descent_gramr   r	   sizedotisfiniteFloatingPointErrorr1   r.   printappendr$   warningswarnr   args)r/   r)   r5   r6   r7   r8   r9   r:   r;   _
n_featuresr(   r+   d_gapcovariance_diagonalindicesicostserrorssub_covarianceidxdirowcoefser,   r,   r-   _graphical_lassoG   s   





&


rn   c                 C   s4   t | }d|jdd|jd d < t t |S )a  Find the maximum alpha for which there are some non-zeros off-diagonal.

    Parameters
    ----------
    emp_cov : ndarray of shape (n_features, n_features)
        The sample covariance matrix.

    Notes
    -----
    This results from the bound for the all the Lasso that are solved
    in GraphicalLasso: each time, the row of cov corresponds to Xy. As the
    bound for alpha is given by `max(abs(Xy))`, the result follows.
    r   Nr   )r!   rK   rL   r    maxr$   )r/   Ar,   r,   r-   	alpha_max   s   
rq   
array-likeboolean)r/   return_costsreturn_n_iterprefer_skip_nested_validation)r6   r7   r8   r9   r:   rt   r;   ru   c                C   sT   t ||d|||||dd	| }
|
j|
jg}|r||
j |	r&||
j t|S )a<  L1-penalized covariance estimator.

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    .. versionchanged:: v0.20
        graph_lasso has been renamed to graphical_lasso

    Parameters
    ----------
    emp_cov : array-like of shape (n_features, n_features)
        Empirical covariance from which to compute the covariance estimate.

    alpha : float
        The regularization parameter: the higher alpha, the more
        regularization, the sparser the inverse covariance.
        Range is (0, inf].

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where p > n. Elsewhere prefer cd
        which is more numerically stable.

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. Range is (0, inf].

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. Range is (0, inf].

    max_iter : int, default=100
        The maximum number of iterations.

    verbose : bool, default=False
        If verbose is True, the objective function and dual gap are
        printed at each iteration.

    return_costs : bool, default=False
        If return_costs is True, the objective function and dual gap
        at each iteration are returned.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

    return_n_iter : bool, default=False
        Whether or not to return the number of iterations.

    Returns
    -------
    covariance : ndarray of shape (n_features, n_features)
        The estimated covariance matrix.

    precision : ndarray of shape (n_features, n_features)
        The estimated (sparse) precision matrix.

    costs : list of (objective, dual_gap) pairs
        The list of values of the objective function and the dual gap at
        each iteration. Returned only if return_costs is True.

    n_iter : int
        Number of iterations. Returned only if `return_n_iter` is set to True.

    See Also
    --------
    GraphicalLasso : Sparse inverse covariance estimation
        with an l1-penalized estimator.
    GraphicalLassoCV : Sparse inverse covariance with
        cross-validated choice of the l1 penalty.

    Notes
    -----
    The algorithm employed to solve this problem is the GLasso algorithm,
    from the Friedman 2008 Biostatistics paper. It is the same algorithm
    as in the R `glasso` package.

    One possible difference with the `glasso` R package is that the
    diagonal coefficients are not penalized.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.datasets import make_sparse_spd_matrix
    >>> from sklearn.covariance import empirical_covariance, graphical_lasso
    >>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42)
    >>> rng = np.random.RandomState(42)
    >>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3)
    >>> emp_cov = empirical_covariance(X, assume_centered=True)
    >>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05)
    >>> emp_cov
    array([[ 1.68...,  0.21..., -0.20...],
           [ 0.21...,  0.22..., -0.08...],
           [-0.20..., -0.08...,  0.23...]])
    precomputedT)	r)   r6   
covariancer7   r8   r9   r:   r;   assume_centered)GraphicalLassofitrb   r(   r[   costs_n_iter_tuple)r/   r)   r6   r7   r8   r9   r:   rt   r;   ru   modeloutputr,   r,   r-   graphical_lasso   s&   v
r   c                       s   e Zd ZU i ejeeddddgeeddddgeeddddgeddhgdgeeddd	dgd
Ze	e
d< ed dddddeejjdf fdd	Z  ZS )BaseGraphicalLassor   Nrightclosedleftr2   rB   r:   both)r7   r8   r9   r6   r:   r;   _parameter_constraintsstore_precisionr3   r4   Fc                    s6   t  j|d || _|| _|| _|| _|| _|| _d S )Nrz   )super__init__r7   r8   r9   r6   r:   r;   )selfr7   r8   r9   r6   r:   r;   rz   	__class__r,   r-   r   u  s   

zBaseGraphicalLasso.__init__)__name__
__module____qualname__r   r   r   r   r   r   rP   __annotations__popr!   finfofloat64r;   r   __classcell__r,   r,   r   r-   r   i  s(   
 
	r   c                
       s   e Zd ZU dZi ejeeddddgedhdgdZe	e
d< 		dd
dddddeejjdd fddZedddddZ  ZS )r{   ag  Sparse inverse covariance estimation with an l1-penalized estimator.

    For a usage example see
    :ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`.

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    .. versionchanged:: v0.20
        GraphLasso has been renamed to GraphicalLasso

    Parameters
    ----------
    alpha : float, default=0.01
        The regularization parameter: the higher alpha, the more
        regularization, the sparser the inverse covariance.
        Range is (0, inf].

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where p > n. Elsewhere prefer cd
        which is more numerically stable.

    covariance : "precomputed", default=None
        If covariance is "precomputed", the input data in `fit` is assumed
        to be the covariance matrix. If `None`, the empirical covariance
        is estimated from the data `X`.

        .. versionadded:: 1.3

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. Range is (0, inf].

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. Range is (0, inf].

    max_iter : int, default=100
        The maximum number of iterations.

    verbose : bool, default=False
        If verbose is True, the objective function and dual gap are
        plotted at each iteration.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

        .. versionadded:: 1.3

    assume_centered : bool, default=False
        If True, data are not centered before computation.
        Useful when working with data whose mean is almost, but not exactly
        zero.
        If False, data are centered before computation.

    Attributes
    ----------
    location_ : ndarray of shape (n_features,)
        Estimated location, i.e. the estimated mean.

    covariance_ : ndarray of shape (n_features, n_features)
        Estimated covariance matrix

    precision_ : ndarray of shape (n_features, n_features)
        Estimated pseudo inverse matrix.

    n_iter_ : int
        Number of iterations run.

    costs_ : list of (objective, dual_gap) pairs
        The list of values of the objective function and the dual gap at
        each iteration. Returned only if return_costs is True.

        .. versionadded:: 1.3

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    graphical_lasso : L1-penalized covariance estimator.
    GraphicalLassoCV : Sparse inverse covariance with
        cross-validated choice of the l1 penalty.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.covariance import GraphicalLasso
    >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
    ...                      [0.0, 0.4, 0.0, 0.0],
    ...                      [0.2, 0.0, 0.3, 0.1],
    ...                      [0.0, 0.0, 0.1, 0.7]])
    >>> np.random.seed(0)
    >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
    ...                                   cov=true_cov,
    ...                                   size=200)
    >>> cov = GraphicalLasso().fit(X)
    >>> np.around(cov.covariance_, decimals=3)
    array([[0.816, 0.049, 0.218, 0.019],
           [0.049, 0.364, 0.017, 0.034],
           [0.218, 0.017, 0.322, 0.093],
           [0.019, 0.034, 0.093, 0.69 ]])
    >>> np.around(cov.location_, decimals=3)
    array([0.073, 0.04 , 0.038, 0.143])
    r   Nr   r   rx   )r)   ry   r   {Gz?r2   r3   r4   F)r6   ry   r7   r8   r9   r:   r;   rz   c          
   	      s*   t  j|||||||	d || _|| _d S N)r7   r8   r9   r6   r:   r;   rz   )r   r   r)   ry   )
r   r)   r6   ry   r7   r8   r9   r:   r;   rz   r   r,   r-   r     s   	
zGraphicalLasso.__init__Trv   c                 C   s   t | |ddd}| jdkr| }t|jd | _nt|| jd}| jr/t|jd | _n|	d| _t
|| jd| j| j| j| j| j| jd	\| _| _| _| _| S )	a  Fit the GraphicalLasso model to X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Data from which to compute the covariance estimate.

        y : Ignored
            Not used, present for API consistency by convention.

        Returns
        -------
        self : object
            Returns the instance itself.
        r   )ensure_min_featuresensure_min_samplesrx   r   r   r   Nr)   r5   r6   r7   r8   r9   r:   r;   )r   ry   rK   r!   zerosr    	location_r   rz   meanrn   r)   r6   r7   r8   r9   r:   r;   rb   r(   r}   r~   )r   Xyr/   r,   r,   r-   r|     s(   
zGraphicalLasso.fit)r   N)r   r   r   __doc__r   r   r   r   r   rP   r   r!   r   r   r;   r   r   r|   r   r,   r,   r   r-   r{     s*   
 vr{   c
                 C   sZ  t d|d }
t| }|du r| }n|}t }t }t }|dur't|}|D ]v}z&t||||||||
|	d	\}}}}|| || |durPt||}W n tyj   tj	 }|tj
 |tj
 Y nw |dur}t|sxtj	 }|| |dkrtjd q)|dkr|durtd||f  q)td|  q)|dur|||fS ||fS )a	  l1-penalized covariance estimator along a path of decreasing alphas

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    Parameters
    ----------
    X : ndarray of shape (n_samples, n_features)
        Data from which to compute the covariance estimate.

    alphas : array-like of shape (n_alphas,)
        The list of regularization parameters, decreasing order.

    cov_init : array of shape (n_features, n_features), default=None
        The initial guess for the covariance.

    X_test : array of shape (n_test_samples, n_features), default=None
        Optional test matrix to measure generalisation error.

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where p > n. Elsewhere prefer cd
        which is more numerically stable.

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. The tolerance must be a positive
        number.

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. The tolerance must be a positive number.

    max_iter : int, default=100
        The maximum number of iterations. This parameter should be a strictly
        positive integer.

    verbose : int or bool, default=False
        The higher the verbosity flag, the more information is printed
        during the fitting.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

        .. versionadded:: 1.3

    Returns
    -------
    covariances_ : list of shape (n_alphas,) of ndarray of shape             (n_features, n_features)
        The estimated covariance matrices.

    precisions_ : list of shape (n_alphas,) of ndarray of shape             (n_features, n_features)
        The estimated (sparse) precision matrices.

    scores_ : list of shape (n_alphas,), dtype=float
        The generalisation error (log-likelihood) on the test data.
        Returned only if test data is passed.
    r   r   Nr   .z/[graphical_lasso_path] alpha: %.2e, score: %.2ez"[graphical_lasso_path] alpha: %.2e)ro   r   rK   rO   rn   r[   r   rY   r!   rQ   nanrX   sysstderrwriterZ   )r   alphasr5   X_testr6   r7   r8   r9   r:   r;   inner_verboser/   rb   covariances_precisions_scores_test_emp_covr)   r(   r_   
this_scorer,   r,   r-   graphical_lasso_pathK  sf   K






r   c                       s   e Zd ZU dZi ejeedddddgeeddddgdgedgd	Zee	d
< ddddddddde
e
jjdd fdd
ZedddddZdd Z  ZS )GraphicalLassoCVa?  Sparse inverse covariance w/ cross-validated choice of the l1 penalty.

    See glossary entry for :term:`cross-validation estimator`.

    Read more in the :ref:`User Guide <sparse_inverse_covariance>`.

    .. versionchanged:: v0.20
        GraphLassoCV has been renamed to GraphicalLassoCV

    Parameters
    ----------
    alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
        If an integer is given, it fixes the number of points on the
        grids of alpha to be used. If a list is given, it gives the
        grid to be used. See the notes in the class docstring for
        more details. Range is [1, inf) for an integer.
        Range is (0, inf] for an array-like of floats.

    n_refinements : int, default=4
        The number of times the grid is refined. Not used if explicit
        values of alphas are passed. Range is [1, inf).

    cv : int, cross-validation generator or iterable, default=None
        Determines the cross-validation splitting strategy.
        Possible inputs for cv are:

        - None, to use the default 5-fold cross-validation,
        - integer, to specify the number of folds.
        - :term:`CV splitter`,
        - An iterable yielding (train, test) splits as arrays of indices.

        For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.

        Refer :ref:`User Guide <cross_validation>` for the various
        cross-validation strategies that can be used here.

        .. versionchanged:: 0.20
            ``cv`` default value if None changed from 3-fold to 5-fold.

    tol : float, default=1e-4
        The tolerance to declare convergence: if the dual gap goes below
        this value, iterations are stopped. Range is (0, inf].

    enet_tol : float, default=1e-4
        The tolerance for the elastic net solver used to calculate the descent
        direction. This parameter controls the accuracy of the search direction
        for a given column update, not of the overall parameter estimate. Only
        used for mode='cd'. Range is (0, inf].

    max_iter : int, default=100
        Maximum number of iterations.

    mode : {'cd', 'lars'}, default='cd'
        The Lasso solver to use: coordinate descent or LARS. Use LARS for
        very sparse underlying graphs, where number of features is greater
        than number of samples. Elsewhere prefer cd which is more numerically
        stable.

    n_jobs : int, default=None
        Number of jobs to run in parallel.
        ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
        ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
        for more details.

        .. versionchanged:: v0.20
           `n_jobs` default changed from 1 to None

    verbose : bool, default=False
        If verbose is True, the objective function and duality gap are
        printed at each iteration.

    eps : float, default=eps
        The machine-precision regularization in the computation of the
        Cholesky diagonal factors. Increase this for very ill-conditioned
        systems. Default is `np.finfo(np.float64).eps`.

        .. versionadded:: 1.3

    assume_centered : bool, default=False
        If True, data are not centered before computation.
        Useful when working with data whose mean is almost, but not exactly
        zero.
        If False, data are centered before computation.

    Attributes
    ----------
    location_ : ndarray of shape (n_features,)
        Estimated location, i.e. the estimated mean.

    covariance_ : ndarray of shape (n_features, n_features)
        Estimated covariance matrix.

    precision_ : ndarray of shape (n_features, n_features)
        Estimated precision matrix (inverse covariance).

    costs_ : list of (objective, dual_gap) pairs
        The list of values of the objective function and the dual gap at
        each iteration. Returned only if return_costs is True.

        .. versionadded:: 1.3

    alpha_ : float
        Penalization parameter selected.

    cv_results_ : dict of ndarrays
        A dict with keys:

        alphas : ndarray of shape (n_alphas,)
            All penalization parameters explored.

        split(k)_test_score : ndarray of shape (n_alphas,)
            Log-likelihood score on left-out data across (k)th fold.

            .. versionadded:: 1.0

        mean_test_score : ndarray of shape (n_alphas,)
            Mean of scores over the folds.

            .. versionadded:: 1.0

        std_test_score : ndarray of shape (n_alphas,)
            Standard deviation of scores over the folds.

            .. versionadded:: 1.0

    n_iter_ : int
        Number of iterations run for the optimal alpha.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

        .. versionadded:: 0.24

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    graphical_lasso : L1-penalized covariance estimator.
    GraphicalLasso : Sparse inverse covariance estimation
        with an l1-penalized estimator.

    Notes
    -----
    The search for the optimal penalization parameter (`alpha`) is done on an
    iteratively refined grid: first the cross-validated scores on a grid are
    computed, then a new refined grid is centered around the maximum, and so
    on.

    One of the challenges which is faced here is that the solvers can
    fail to converge to a well-conditioned estimate. The corresponding
    values of `alpha` then come out as missing values, but the optimum may
    be close to these missing values.

    In `fit`, once the best parameter `alpha` is found through
    cross-validation, the model is fit again using the entire training set.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.covariance import GraphicalLassoCV
    >>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
    ...                      [0.0, 0.4, 0.0, 0.0],
    ...                      [0.2, 0.0, 0.3, 0.1],
    ...                      [0.0, 0.0, 0.1, 0.7]])
    >>> np.random.seed(0)
    >>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
    ...                                   cov=true_cov,
    ...                                   size=200)
    >>> cov = GraphicalLassoCV().fit(X)
    >>> np.around(cov.covariance_, decimals=3)
    array([[0.816, 0.051, 0.22 , 0.017],
           [0.051, 0.364, 0.018, 0.036],
           [0.22 , 0.018, 0.322, 0.094],
           [0.017, 0.036, 0.094, 0.69 ]])
    >>> np.around(cov.location_, decimals=3)
    array([0.073, 0.04 , 0.038, 0.143])
    r   Nr   r   rr   r   	cv_object)r   n_refinementscvn_jobsr      r3   r4   r2   F)r   r   r   r7   r8   r9   r6   r   r:   r;   rz   c             	      s6   t  j|||||	|
|d || _|| _|| _|| _d S r   )r   r   r   r   r   r   )r   r   r   r   r7   r8   r9   r6   r   r:   r;   rz   r   r,   r-   r     s   	
zGraphicalLassoCV.__init__Trv   c              
      sf  t |d t dd jrt jd _n d_t jd}t	j
|dd}t }j}tdjd t|rXjD ]}t|d	tdtjd
d qDjd}	nj}	t|}
d|
 }tt|t|
|ddd t rtdfi |}ntti dd}t }t|	D ]}t , tdt t j!jd fdd|j" |fi |j#j"D }W d   n1 sw   Y  t$| \}}}t$| }t$| }|%t$|| t&|t'(ddd}tj }d}t)|D ],\}\}}}t|}|dt*tj+j, krtj-}t.|r|}||kr%|}|}q|dkr8|d d }
|d d }nE||krU|t/|d ksU|| d }
||d  d }n(|t/|d krm|| d }
d|| d  }n||d  d }
||d  d }t|stt|
t||d dd jr|	dkrt0d|d |	t | f  qtt$| }t|d }t|d 1d |1t2t3  |j!|d t4|}dt4i_5t|jd D ]}|dd|f j5d| d< qtj|ddj5d< tj6|ddj5d< | }|_7t8||j9j:j;j<j,d \_=_>_?_@S )!aX  Fit the GraphicalLasso covariance model to X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Data from which to compute the covariance estimate.

        y : Ignored
            Not used, present for API consistency by convention.

        **params : dict, default=None
            Parameters to be passed to the CV splitter and the
            cross_val_score function.

            .. versionadded:: 1.5
                Only available if `enable_metadata_routing=True`,
                which can be set by using
                ``sklearn.set_config(enable_metadata_routing=True)``.
                See :ref:`Metadata Routing User Guide <metadata_routing>` for
                more details.

        Returns
        -------
        self : object
            Returns the instance itself.
        r|   r   )r   r   r   r   F)
classifierr)   r   )min_valmax_valinclude_boundariesr   N)split)splitterr=   )r   r:   c                 3   sL    | ]!\}}t t |  | jjjtd j jd	V  qdS )皙?)r   r   r6   r7   r8   r9   r:   r;   N)r   r   r6   r7   r8   intr9   r;   ).0traintestr   r   r   r   r,   r-   	<genexpr>  s    
z'GraphicalLassoCV.fit.<locals>.<genexpr>T)keyreverser   z8[GraphicalLassoCV] Done refinement % 2i out of %i: % 3is)r   r   r:   paramsr   r   _test_score)axismean_test_scorestd_test_score)r)   r6   r7   r8   r9   r:   r;   )Ar   r   rz   r!   r   r    r   r   r   r
   r   rO   r   ro   r:   r   r   r   rQ   r   rq   logspacelog10r   r   r   timerR   r\   catch_warningssimplefilterr   r   r   r   r   zipextendsortedoperator
itemgetter	enumerater   r   r;   r   rX   lenrZ   r[   r   r   arraycv_results_stdalpha_rn   r6   r7   r8   r9   rb   r(   r}   r~   )r   r   r   r   r/   r   pathn_alphasr)   r   alpha_1alpha_0routed_paramst0re   	this_pathcovsr_   scores
best_scorelast_finite_idxindexr   
best_indexgrid_scores
best_alphar,   r   r-   r|     s   
$








"
zGraphicalLassoCV.fitc                 C   s.   t | jjdjt| jt jdddd}|S )aj  Get metadata routing of this object.

        Please check :ref:`User Guide <metadata_routing>` on how the routing
        mechanism works.

        .. versionadded:: 1.5

        Returns
        -------
        routing : MetadataRouter
            A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
            routing information.
        )ownerr   r|   )calleecaller)r   method_mapping)r   r   r   addr
   r   r   )r   routerr,   r,   r-   get_metadata_routingb  s
   z%GraphicalLassoCV.get_metadata_routingr   )r   r   r   r   r   r   r   r   rP   r   r!   r   r   r;   r   r   r|   r   r   r,   r,   r   r-   r     s6   
  7 ;r   );r   r   r   r   r\   numbersr   r   numpyr!   scipyr   baser   
exceptionsr   linear_modelr   rT   r	   model_selectionr
   r   utilsr   utils._param_validationr   r   r   utils.metadata_routingr   r   r   r   r   utils.parallelr   r   utils.validationr   r   r   r    r   r   r   r.   r1   r   r   r;   rn   rq   r   r   r{   r   r   r,   r,   r,   r-   <module>   s|       G
 