o
    \hp                     @   sZ  d dl Z d dlmZmZ d dlmZmZ d dlZd dl	m
Z ddlmZmZmZ ddlmZmZ ddlmZ ddlmZmZmZmZ dd	lmZmZ dd
lmZ ddlm Z  ddl!m"Z"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z- ddl+m.Z/ ddl+m0Z1 g dZ2dd Z3G dd deedZ4G dd dee4edZ5dd Z6					d!dd Z7dS )"    N)ABCMetaabstractmethod)IntegralReal   )BaseEstimatorClassifierMixin_fit_context)ConvergenceWarningNotFittedError)LabelEncoder)check_arraycheck_random_statecolumn_or_1dcompute_class_weight)Interval
StrOptions)safe_sparse_dot)available_if)_ovr_decision_functioncheck_classification_targets)_check_large_sparse_check_sample_weight_num_samplescheck_consistent_lengthcheck_is_fittedvalidate_data   )
_liblinear)_libsvm)_libsvm_sparse)c_svcnu_svc	one_classepsilon_svrnu_svrc                 C   s   | j d d }g }ttdg|g}t|D ]W}||| ||d  ddf }t|d |D ]=}||| ||d  ddf }	| |d || ||d  f }
| ||| ||d  f }|t|
|t||	  q1q|S )zGenerate primal coefficients from dual coefficients
    for the one-vs-one multi class LibSVM in the case
    of a linear kernel.r   r   N)shapenpcumsumhstackrangeappendr   )	dual_coef	n_supportsupport_vectorsn_classcoefsv_locsclass1sv1class2sv2alpha1alpha2 r8   T/home/air/segue/gemini/backup/venv/lib/python3.10/site-packages/sklearn/svm/_base.py_one_vs_one_coef%   s   	   r:   c                       s  e Zd ZU dZeh degeeddddgeddheed	dddgeeddd
dgeed	dd
dgeed	dddgeed	dddgeed	dddgdgdgeeddd
dgedhe	dgdgeeddddgdgdZ
e	ed< g dZedd Z fddZeddd;ddZdd Zdd  Zd!d" Zd#d$ Zd%d& Zd'd( Zd)d* Zd+d, Zd-d. Zd/d0 Zd1d2 Zd3d4 Zed5d6 Zd7d8 Z ed9d: Z!  Z"S )<
BaseLibSVMzBase class for estimators that use libsvm as backing library.

    This implements support vector machine classification and regression.

    Parameter documentation is in the derived `SVC` class.
    >   rbfpolylinearsigmoidprecomputedr   Nleft)closedscaleauto        neitherright      ?booleanbalancedverboserandom_statekerneldegreegammacoef0tolCnuepsilon	shrinkingprobability
cache_sizeclass_weightrK   max_iterrM   _parameter_constraints)r>   r=   r<   r?   r@   c                 C   sz   | j tvrtdt| j f || _|| _|| _|| _|| _|| _|| _	|| _
|	| _|
| _|| _|| _|| _|| _|| _d S )Nz&impl should be one of %s, %s was given)_implLIBSVM_IMPL
ValueErrorrO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   rK   r[   rM   )selfrO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   rK   r[   rM   r8   r8   r9   __init__j   s&   

zBaseLibSVM.__init__c                    s*   t   }| jdk|j_| jdk|j_|S Nr@   )super__sklearn_tags__rO   
input_tagspairwisesparser`   tags	__class__r8   r9   rd      s   
zBaseLibSVM.__sklearn_tags__T)prefer_skip_nested_validationc              	   C   s  t | j}t|}|r| jdkrtd|ot| j | _t| jr)t|| nt	| ||t
jdddd\}}| |}t
j|du rDg n|t
jd}t| j}t|}|d	krl||jd
 krltdd||jd
 f  | jdkr||jd krtd|jd
 |jd |jd
 d
kr|jd
 |krtd|j|jf t| jrdn| j}|dkrd| _nGt| jtr| jdkr|r|| | d	  n| }	|	d
krd|jd |	  nd| _n| jdkrd|jd  | _n
t| jtr| j| _| jr| jn| j}
| jr
t ddd |!t
"dj#}|
||||||d t$|dr&|jn|f| _%| j&' | _(| j)| _*| jdv rNt+| j,d	krN|  j&d9  _&| j) | _)| jrV| j*j-n| j*}t
.| j(/ }t
.|/ }|rn|srtd| jdv r~| j0| _1| S | j02 | _1| S )a  Fit the SVM model according to the given training data.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)                 or (n_samples, n_samples)
            Training vectors, where `n_samples` is the number of samples
            and `n_features` is the number of features.
            For kernel="precomputed", the expected shape of X is
            (n_samples, n_samples).

        y : array-like of shape (n_samples,)
            Target values (class labels in classification, real numbers in
            regression).

        sample_weight : array-like of shape (n_samples,), default=None
            Per-sample weights. Rescale C per sample. Higher weights
            force the classifier to put more emphasis on these points.

        Returns
        -------
        self : object
            Fitted estimator.

        Notes
        -----
        If X and y are not C-ordered and contiguous arrays of np.float64 and
        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.

        If X is a dense array, then the other methods will not support sparse
        matrices as input.
        r@   z-Sparse precomputed kernels are not supported.rT   csrF)dtypeorderaccept_sparseaccept_large_sparseNrn   r   r   z"X and y have incompatible shapes.
zX has %s samples, but y has %s.r   zDPrecomputed matrix must be a square matrix. Input is a {}x{} matrix.zsample_weight and X have incompatible shapes: %r vs %r
Note: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV).rE   rC   rH   rD   z[LibSVM] endi)random_seedr&   r!   r"   rL   zxThe dual coefficients or intercepts are not finite. The input data may contain large values and need to be preprocessed.)3r   rM   spissparserO   	TypeErrorcallable_sparser   r   r'   float64_validate_targetsasarrayr^   indexr]   r   r&   r_   format_gamma
isinstancerQ   strmultiplymeanvarr   _sparse_fit
_dense_fitrK   printrandintiinfomaxhasattr
shape_fit_
intercept_copy_intercept_
dual_coef__dual_coef_lenclasses_dataisfiniteall	_num_itern_iter_item)r`   Xysample_weightrndrg   solver_type	n_samplesrO   X_varfitseedr,   intercept_finitenessdual_coef_finitenessr8   r8   r9   r      s   
"






&"

zBaseLibSVM.fitc                 C   s   t |ddjtjddS )zxValidation of y and class_weight.

        Default implementation for SVR and one-class; overridden in BaseSVC.
        TwarnF)r   )r   astyper'   r~   )r`   r   r8   r8   r9   r   '  s   zBaseLibSVM._validate_targetsc                 C   s2   | j dv sJ | j dkrtd| j t d S d S )Nr   r   r   znSolver terminated early (max_iter=%i).  Consider pre-processing your data with StandardScaler or MinMaxScaler.)fit_status_warningsr   r[   r
   r`   r8   r8   r9   _warn_from_fit_status.  s   
z BaseLibSVM._warn_from_fit_statusc              
   C   s  t | jr|| _| |}|jd |jd krtdt| j tj	||fi d|d|dt
| dtdd|d	| jd
| jd| jd| jd| jd| jd| jd| jd| jd| jd| jd|\	| _| _| _| _| _| _| _| _| _ | !  d S )Nr   r   z(X.shape[0] should be equal to X.shape[1]svm_typer   rZ   class_weight_rO   rT   rU   rX   rP   rW   rS   rY   rR   rQ   rV   r[   rw   )"r|   rO   _BaseLibSVM__Xfit_compute_kernelr&   r_   libsvmset_verbosity_wraprK   r   getattrr'   emptyrT   rU   rX   rP   rW   rS   rY   rR   r   rV   r[   support_support_vectors_
_n_supportr   r   _probA_probBr   r   r   )r`   r   r   r   r   rO   rw   r8   r8   r9   r   8  sj   

	
zBaseLibSVM._dense_fitc                 C   sP  t j|jt jdd|_|  | j|}t| j	 t
|jd |j|j|j|||| j| j| j| j| jt| dt d|| j| j| jt| jt| j| j|\	| _| _}| _| _| _ | _!| _"| _#| $  t%| drrt&| j'd }	nd}	| jjd }
t (t )|
|	}|
st*+g | _,d S t )d|j-d |j-|	 }t*+|||f|	|
f| _,d S )NrT   rn   ro   r   r   r   r   ).r'   r   r   r~   sort_indices_sparse_kernelsr   libsvm_sparser   rK   libsvm_sparse_trainr&   indicesindptrrP   r   rR   rS   rT   r   r   rU   rY   rV   intrW   rX   r[   r   r   r   r   r   r   r   r   r   r   r   r   tilearangery   
csr_matrixr   size)r`   r   r   r   r   rO   rw   kernel_typedual_coef_datar/   n_SVdual_coef_indicesdual_coef_indptrr8   r8   r9   r   g  sf   

zBaseLibSVM._sparse_fitc                 C   s$   |  |}| jr| jn| j}||S )a  Perform regression on samples in X.

        For an one-class model, +1 (inlier) or -1 (outlier) is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            The predicted values.
        )_validate_for_predictr}   _sparse_predict_dense_predict)r`   r   predictr8   r8   r9   r     s   
zBaseLibSVM.predictc                 C   s   |  |}|jdkrt|ddd}| j}t| jr3d}|jd | jd kr3td|jd | jd f t	| j
}tj|| j| j| j| j| j| j| j||| j| j| j| jdS )	Nr   rT   F)ro   rq   r@   r   MX.shape[1] = %d should be equal to %d, the number of samples at training time)r   rO   rP   rR   rQ   rY   )r   ndimr   rO   r|   r&   r   r_   r^   r   r]   r   r   r   r   r   r   r   r   r   rP   rR   r   rY   )r`   r   rO   r   r8   r8   r9   r     s:   


zBaseLibSVM._dense_predictc                 C   s   | j }t|r	d}| j|}d}t|j|j|j| j	j| j	j| j	j| j
j| jt| j|| j| j| j| j|t| dtd| j| j| j| j| j| j| jS )Nr@   rE   r   r   )rO   r|   r   r   r   libsvm_sparse_predictr   r   r   r   r   r   r^   r]   rP   r   rR   rS   r   r'   r   rU   rV   rW   rX   r   r   r   )r`   r   rO   r   rT   r8   r8   r9   r     s<   
zBaseLibSVM._sparse_predictc                 C   s@   t | jr| || j}t|r| }tj|tjdd}|S )z0Return the data transformed by a callable kernelrT   r   )	r|   rO   r   ry   rz   toarrayr'   r   r~   r`   r   rO   r8   r8   r9   r     s   

zBaseLibSVM._compute_kernelc                 C   sV   |  |}| |}| jr| |}n| |}| jdv r)t| jdkr)|  S |S )af  Evaluates the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)

        Returns
        -------
        X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
        rx   r   )	r   r   r}   _sparse_decision_function_dense_decision_functionr]   r   r   ravel)r`   r   dec_funcr8   r8   r9   _decision_function	  s   



zBaseLibSVM._decision_functionc                 C   sh   t |tjddd}| j}t|rd}tj|| j| j| j	| j
| j| j| jt| j|| j| j| j| jdS )NrT   F)rn   ro   rq   r@   r   rO   rP   rY   rR   rQ   )r   r'   r~   rO   r|   r   decision_functionr   r   r   r   r   r   r   r^   r   r]   rP   rY   rR   r   r   r8   r8   r9   r   '  s(   
z#BaseLibSVM._dense_decision_functionc                 C   s   t j|jt jdd|_| j}t|drd}| j|}t	|j|j
|j| jj| jj
| jj| jj| jt| j|| j| j| j| j| jt| dt d| j| j| j| j| j| j| jS )NrT   r   __call__r@   r   r   )r'   r   r   r~   rO   r   r   r   r   libsvm_sparse_decision_functionr   r   r   r   r   r^   r]   rP   r   rR   rS   rT   r   r   rU   rV   rW   rX   r   r   r   r`   r   rO   r   r8   r8   r9   r   ?  s<   

z$BaseLibSVM._sparse_decision_functionc              	   C   s   t |  t| jst| |dtjdddd}| jr"t|s"t	|}| jr)|
  t|r?| js?t| js?tdt| j | jdkr\|jd | jd kr\td	|jd | jd f | j}| js{|jdkr{| j |jd kr{td
| jj d|S )Nrm   rT   F)rp   rn   ro   rq   resetz3cannot use sparse input in %r trained on dense datar@   r   r   r   zThe internal representation of z was altered)r   r|   rO   r   r'   r~   r}   ry   rz   r   r   r_   type__name__r&   r   r   r   
n_support_sumrk   )r`   r   svr8   r8   r9   r   b  sD   



$z BaseLibSVM._validate_for_predictc                 C   s>   | j dkr	td|  }t|rd|jj_|S d|j_|S )zWeights assigned to the features when `kernel="linear"`.

        Returns
        -------
        ndarray of shape (n_features, n_classes)
        r>   z2coef_ is only available when using a linear kernelF)rO   AttributeError	_get_coefry   rz   r   flags	writeabler`   r0   r8   r8   r9   coef_  s   


zBaseLibSVM.coef_c                 C   s   t | j| jS N)r   r   r   r   r8   r8   r9   r     s   zBaseLibSVM._get_coefc                 C   sL   zt |  W n	 ty   tw t| j}|dv r| jS t| jd gS )z)Number of support vectors for each class.r   r   )	r   r   r   r^   r   r]   r   r'   array)r`   r   r8   r8   r9   r     s   zBaseLibSVM.n_support_r   )#r   
__module____qualname____doc__r   r|   r   r   r   dictr\   __annotations__r   r   ra   rd   r	   r   r   r   r   r   r   r   r   r   r   r   r   r   propertyr   r   r   __classcell__r8   r8   rj   r9   r;   E   s\   
 
	

' 
/="$#)
r;   )	metaclassc                       s   e Zd ZU dZi ejeddhgdgdZeed< dD ]Z	e
e	 qe fdd	Zd
d Zdd Z fddZdd Zeedd Zeedd Zdd Zdd Zdd Zedd Zedd Z fd d!Z  ZS )"BaseSVCz!ABC for LibSVM-based classifiers.ovrovorI   )decision_function_shape
break_tiesr\   )rV   rU   c                    s:   || _ || _t j|||||||d||	|
||||d d S )NrE   rN   )r   r   rc   ra   )r`   rO   rP   rQ   rR   rS   rT   rU   rW   rX   rY   rZ   rK   r[   r   rM   r   rj   r8   r9   ra     s&   
zBaseSVC.__init__c                 C   sl   t |dd}t| tj|dd\}}t| j||d| _t|dk r*tdt| || _	tj
|tjddS )	NTr   )return_inverseclassesr   r   z>The number of classes has to be greater than one; got %d classrT   r   )r   r   r'   uniquer   rZ   r   r   r_   r   r   r~   )r`   r   y_clsr8   r8   r9   r     s   zBaseSVC._validate_targetsc                 C   s>   |  |}| jdkrt| jdkrt|dk | t| jS |S )a4  Evaluate the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The input samples.

        Returns
        -------
        X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
            If decision_function_shape='ovr', the shape is (n_samples,
            n_classes).

        Notes
        -----
        If decision_function_shape='ovo', the function values are proportional
        to the distance of the samples X to the separating hyperplane. If the
        exact distances are required, divide the function values by the norm of
        the weight vector (``coef_``). See also `this question
        <https://stats.stackexchange.com/questions/14876/
        interpreting-distance-from-hyperplane-in-svm>`_ for further details.
        If decision_function_shape='ovr', the decision function is a monotonic
        transformation of ovo decision function.
        r   r   r   )r   r   r   r   r   )r`   r   decr8   r8   r9   r     s   
zBaseSVC.decision_functionc                    sx   t |  | jr| jdkrtd| jr*| jdkr*t| jdkr*tj| |dd}nt	 
|}| jtj|tjdS )a  Perform classification on samples in X.

        For an one-class model, +1 or -1 is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            Class labels for samples in X.
        r   z>break_ties must be False when decision_function_shape is 'ovo'r   r   r   )axisrr   )r   r   r   r_   r   r   r'   argmaxr   rc   r   taker   intp)r`   r   r   rj   r8   r9   r     s   
zBaseSVC.predictc                 C   s$   | j std| jdvrtddS )Nz5predict_proba is not available when probability=Falserx   z0predict_proba only implemented for SVC and NuSVCT)rX   r   r]   r   r8   r8   r9   _check_proba=  s   
zBaseSVC._check_probac                 C   sD   |  |}| jjdks| jjdkrtd| jr| jn| j}||S )a  Compute probabilities of possible outcomes for samples in X.

        The model needs to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the probability of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        r   zApredict_proba is not available when fitted with probability=False)r   probA_r   probB_r   r}   _sparse_predict_proba_dense_predict_proba)r`   r   
pred_probar8   r8   r9   predict_probaF  s   
zBaseSVC.predict_probac                 C   s   t | |S )a  Compute log probabilities of possible outcomes for samples in X.

        The model need to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the log-probabilities of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        )r'   logr  )r`   r   r8   r8   r9   predict_log_probak  s   zBaseSVC.predict_log_probac                 C   sh   |  |}| j}t|rd}t| j}tj|| j| j	| j
| j| j| j| j||| j| j| j| jd}|S )Nr@   r   )r   rO   r|   r^   r   r]   r   r  r   r   r   r   r   r   r   rP   rY   rR   r   )r`   r   rO   r   pprobr8   r8   r9   r    s,   
zBaseSVC._dense_predict_probac                 C   s   t j|jt jdd|_| j}t|rd}| j|}t	|j|j
|j| jj| jj
| jj| jj| jt| j|| j| j| j| j| jt| dt d| j| j| j| j| j| j| jS )NrT   r   r@   r   r   )r'   r   r   r~   rO   r|   r   r   r   libsvm_sparse_predict_probar   r   r   r   r   r^   r]   rP   r   rR   rS   rT   r   r   rU   rV   rW   rX   r   r   r   r   r8   r8   r9   r    s<   
zBaseSVC._sparse_predict_probac                 C   sb   | j jd dkrt| j | j}|S t| j | j| j}t|d r*t|	 }|S t
|}|S )Nr   r   )r   r&   r   r   r:   r   ry   rz   vstacktocsrr'   r   r8   r8   r9   r     s   
zBaseSVC._get_coefc                 C      | j S zParameter learned in Platt scaling when `probability=True`.

        Returns
        -------
        ndarray of shape  (n_classes * (n_classes - 1) / 2)
        )r   r   r8   r8   r9   r	       zBaseSVC.probA_c                 C   r  r  )r   r   r8   r8   r9   r
    r  zBaseSVC.probB_c                    s   t   }| jdk|j_|S rb   )rc   rd   rO   re   rg   rh   rj   r8   r9   rd     s   
zBaseSVC.__sklearn_tags__)r   r   r   r   r;   r\   r   r   r   unused_parampopr   ra   r   r   r   r  r   r  r  r  r  r   r   r	  r
  rd   r   r8   r8   rj   r9   r     s8   
 ' %	
$
#
	
	r   c           	      C   s   ddiddddddd	iidd
idddddddiiddddidd}| dkr.||  S | dkr8t d|  ||d}|du rGd| }n%||d}|du rXd||f }n||d}|du rjd|||f }n|S t d||||f )a  Find the liblinear magic number for the solver.

    This number depends on the values of the following attributes:
      - multi_class
      - penalty
      - loss
      - dual

    The same number is also internally used by LibLinear to determine
    which solver to use.
    F   r      )FT)l1l2r  T      r   r               )logistic_regressionhingesquared_hingeepsilon_insensitivesquared_epsilon_insensitivecrammer_singerr)  r   z<`multi_class` must be one of `ovr`, `crammer_singer`, got %rNzloss='%s' is not supportedz>The combination of penalty='%s' and loss='%s' is not supportedzLThe combination of penalty='%s' and loss='%s' are not supported when dual=%szJUnsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r)r_   get)	multi_classpenaltylossdual_solver_type_dict_solver_penerror_string_solver_dual
solver_numr8   r8   r9   _get_liblinear_solver_type  sD   

	

r4  r   r$  皙?c                 C   s  |dvr%t  }||}|j}t|dk rtd|d  t|||d}n
tjdtjd}|}t	
| t|}|r@tddd	 d
}|rP|dkrNtd| |}t
| t
| t	
| t| rht|  tj|tjd }tj|dd}t|| tjd}t||||}t	| |t| ||
||||	|tdj||\}}t|}||	krtdt |r|ddddf }||dddf  }n|}d}|||fS )a  Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.

    Preprocessing is done in this function before supplying it to liblinear.

    Parameters
    ----------
    X : {array-like, sparse matrix} of shape (n_samples, n_features)
        Training vector, where `n_samples` is the number of samples and
        `n_features` is the number of features.

    y : array-like of shape (n_samples,)
        Target vector relative to X

    C : float
        Inverse of cross-validation parameter. The lower the C, the higher
        the penalization.

    fit_intercept : bool
        Whether or not to fit an intercept. If set to True, the feature vector
        is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
        1 corresponds to the intercept. If set to False, no intercept will be
        used in calculations (i.e. data is expected to be already centered).

    intercept_scaling : float
        Liblinear internally penalizes the intercept, treating it like any
        other term in the feature vector. To reduce the impact of the
        regularization on the intercept, the `intercept_scaling` parameter can
        be set to a value greater than 1; the higher the value of
        `intercept_scaling`, the lower the impact of regularization on it.
        Then, the weights become `[w_x_1, ..., w_x_n,
        w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
        the feature weights and the intercept weight is scaled by
        `intercept_scaling`. This scaling allows the intercept term to have a
        different regularization behavior compared to the other features.

    class_weight : dict or 'balanced', default=None
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one. For
        multi-output problems, a list of dicts can be provided in the same
        order as the columns of y.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

    penalty : {'l1', 'l2'}
        The norm of the penalty used in regularization.

    dual : bool
        Dual or primal formulation,

    verbose : int
        Set verbose to any positive number for verbosity.

    max_iter : int
        Number of iterations.

    tol : float
        Stopping condition.

    random_state : int, RandomState instance or None, default=None
        Controls the pseudo random number generation for shuffling the data.
        Pass an int for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.

    multi_class : {'ovr', 'crammer_singer'}, default='ovr'
        `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
        optimizes a joint objective over all classes.
        While `crammer_singer` is interesting from an theoretical perspective
        as it is consistent it is seldom used in practice and rarely leads to
        better accuracy and is more expensive to compute.
        If `crammer_singer` is chosen, the options loss, penalty and dual will
        be ignored.

    loss : {'logistic_regression', 'hinge', 'squared_hinge',             'epsilon_insensitive', 'squared_epsilon_insensitive},             default='logistic_regression'
        The loss function used to fit the model.

    epsilon : float, default=0.1
        Epsilon parameter in the epsilon-insensitive loss function. Note
        that the value of this parameter depends on the scale of the target
        variable y. If unsure, set epsilon=0.

    sample_weight : array-like of shape (n_samples,), default=None
        Weights assigned to each sample.

    Returns
    -------
    coef_ : ndarray of shape (n_features, n_features + 1)
        The coefficient vector got by minimizing the objective function.

    intercept_ : float
        The intercept term added to the vector.

    n_iter_ : array of int
        Number of iterations run across for each class.
    )r'  r(  r   zeThis solver needs samples of at least 2 classes in the data, but the data contains only one class: %rr   r   rr   z[LibLinear]rs   rt   g      zqIntercept scaling is %r but needs to be greater than 0. To disable fitting an intercept, set fit_intercept=False.W)requirementsrv   z@Liblinear failed to converge, increase the number of iterations.NrL   rE   )r   fit_transformr   r   r_   r   r'   r   r~   	liblinearr   r   r   r   r   ry   rz   r   r   r   requirer   r4  
train_wrapr   r   r   r   r   r
   )r   r   rT   fit_interceptintercept_scalingrZ   r,  r.  rK   r[   rS   rM   r+  r-  rV   r   ency_indr   r   r   biasr   	raw_coef_r   
n_iter_maxr   r   r8   r8   r9   _fit_liblinear*  sz   t






rC  )Nr   r$  r5  N)8r   abcr   r   numbersr   r   numpyr'   scipy.sparserg   ry   baser   r   r	   
exceptionsr
   r   preprocessingr   utilsr   r   r   r   utils._param_validationr   r   utils.extmathr   utils.metaestimatorsr   utils.multiclassr   r   utils.validationr   r   r   r   r   r   rs   r   r9  r   r   r    r   r^   r:   r;   r   r4  rC  r8   r8   r8   r9   <module>   sD         u  =E