repository
stringclasses
11 values
repo_id
stringlengths
1
3
target_module_path
stringlengths
16
72
prompt
stringlengths
298
21.7k
relavent_test_path
stringlengths
50
99
full_function
stringlengths
336
33.8k
function_name
stringlengths
2
51
scikit-learn
122
sklearn/neighbors/_nca.py
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0): """Compute the loss and the loss gradient w.r.t. `transformation`. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The raveled linear transformation on which to compute loss and evaluate gradient. X : ndarray of shape (n_samples, n_features) The training samples. same_class_mask : ndarray of shape (n_samples, n_samples) A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong to the same class, and `0` otherwise. Returns ------- loss : float The loss computed for the given transformation. gradient : ndarray of shape (n_components * n_features,) The new (flattened) gradient of the loss. """
/usr/src/app/target_test_cases/failed_tests_NeighborhoodComponentsAnalysis._loss_grad_lbfgs.txt
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0): """Compute the loss and the loss gradient w.r.t. `transformation`. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The raveled linear transformation on which to compute loss and evaluate gradient. X : ndarray of shape (n_samples, n_features) The training samples. same_class_mask : ndarray of shape (n_samples, n_samples) A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong to the same class, and `0` otherwise. Returns ------- loss : float The loss computed for the given transformation. gradient : ndarray of shape (n_components * n_features,) The new (flattened) gradient of the loss. """ if self.n_iter_ == 0: self.n_iter_ += 1 if self.verbose: header_fields = ["Iteration", "Objective Value", "Time(s)"] header_fmt = "{:>10} {:>20} {:>10}" header = header_fmt.format(*header_fields) cls_name = self.__class__.__name__ print("[{}]".format(cls_name)) print( "[{}] {}\n[{}] {}".format( cls_name, header, cls_name, "-" * len(header) ) ) t_funcall = time.time() transformation = transformation.reshape(-1, X.shape[1]) X_embedded = np.dot(X, transformation.T) # (n_samples, n_components) # Compute softmax distances p_ij = pairwise_distances(X_embedded, squared=True) np.fill_diagonal(p_ij, np.inf) p_ij = softmax(-p_ij) # (n_samples, n_samples) # Compute loss masked_p_ij = p_ij * same_class_mask p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1) loss = np.sum(p) # Compute gradient of loss w.r.t. `transform` weighted_p_ij = masked_p_ij - p_ij * p weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0)) gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X) # time complexity of the gradient: O(n_components x n_samples x ( # n_samples + n_features)) if self.verbose: t_funcall = time.time() - t_funcall values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}" print( values_fmt.format( self.__class__.__name__, self.n_iter_, loss, t_funcall ) ) sys.stdout.flush() return sign * loss, sign * gradient.ravel()
NeighborhoodComponentsAnalysis._loss_grad_lbfgs
scikit-learn
123
sklearn/neighbors/_nca.py
def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_NeighborhoodComponentsAnalysis.fit.txt
def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- self : object Fitted estimator. """ # Validate the inputs X and y, and converts y to numerical classes. X, y = validate_data(self, X, y, ensure_min_samples=2) check_classification_targets(y) y = LabelEncoder().fit_transform(y) # Check the preferred dimensionality of the projected space if self.n_components is not None and self.n_components > X.shape[1]: raise ValueError( "The preferred dimensionality of the " f"projected space `n_components` ({self.n_components}) cannot " "be greater than the given data " f"dimensionality ({X.shape[1]})!" ) # If warm_start is enabled, check that the inputs are consistent if ( self.warm_start and hasattr(self, "components_") and self.components_.shape[1] != X.shape[1] ): raise ValueError( f"The new inputs dimensionality ({X.shape[1]}) does not " "match the input dimensionality of the " f"previously learned transformation ({self.components_.shape[1]})." ) # Check how the linear transformation should be initialized init = self.init if isinstance(init, np.ndarray): init = check_array(init) # Assert that init.shape[1] = X.shape[1] if init.shape[1] != X.shape[1]: raise ValueError( f"The input dimensionality ({init.shape[1]}) of the given " "linear transformation `init` must match the " f"dimensionality of the given inputs `X` ({X.shape[1]})." ) # Assert that init.shape[0] <= init.shape[1] if init.shape[0] > init.shape[1]: raise ValueError( f"The output dimensionality ({init.shape[0]}) of the given " "linear transformation `init` cannot be " f"greater than its input dimensionality ({init.shape[1]})." ) # Assert that self.n_components = init.shape[0] if self.n_components is not None and self.n_components != init.shape[0]: raise ValueError( "The preferred dimensionality of the " f"projected space `n_components` ({self.n_components}) does" " not match the output dimensionality of " "the given linear transformation " f"`init` ({init.shape[0]})!" ) # Initialize the random generator self.random_state_ = check_random_state(self.random_state) # Measure the total training time t_train = time.time() # Compute a mask that stays fixed during optimization: same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] # (n_samples, n_samples) # Initialize the transformation transformation = np.ravel(self._initialize(X, y, init)) # Create a dictionary of parameters to be passed to the optimizer disp = self.verbose - 2 if self.verbose > 1 else -1 optimizer_params = { "method": "L-BFGS-B", "fun": self._loss_grad_lbfgs, "args": (X, same_class_mask, -1.0), "jac": True, "x0": transformation, "tol": self.tol, "options": dict(maxiter=self.max_iter, disp=disp), "callback": self._callback, } # Call the optimizer self.n_iter_ = 0 opt_result = minimize(**optimizer_params) # Reshape the solution found by the optimizer self.components_ = opt_result.x.reshape(-1, X.shape[1]) # Stop timer t_train = time.time() - t_train if self.verbose: cls_name = self.__class__.__name__ # Warn the user if the algorithm did not converge if not opt_result.success: warn( "[{}] NCA did not converge: {}".format( cls_name, opt_result.message ), ConvergenceWarning, ) print("[{}] Training took {:8.2f}s.".format(cls_name, t_train)) return self
NeighborhoodComponentsAnalysis.fit
scikit-learn
124
sklearn/kernel_approximation.py
def fit(self, X, y=None): """Fit estimator to data. Samples a subset of training points, computes kernel on these and computes normalization matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_Nystroem.fit.txt
def fit(self, X, y=None): """Fit estimator to data. Samples a subset of training points, computes kernel on these and computes normalization matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, accept_sparse="csr") rnd = check_random_state(self.random_state) n_samples = X.shape[0] # get basis vectors if self.n_components > n_samples: # XXX should we just bail? n_components = n_samples warnings.warn( "n_components > n_samples. This is not possible.\n" "n_components was set to n_samples, which results" " in inefficient evaluation of the full kernel." ) else: n_components = self.n_components n_components = min(n_samples, n_components) inds = rnd.permutation(n_samples) basis_inds = inds[:n_components] basis = X[basis_inds] basis_kernel = pairwise_kernels( basis, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **self._get_kernel_params(), ) # sqrt of kernel matrix on basis vectors U, S, V = svd(basis_kernel) S = np.maximum(S, 1e-12) self.normalization_ = np.dot(U / np.sqrt(S), V) self.components_ = basis self.component_indices_ = basis_inds self._n_features_out = n_components return self
Nystroem.fit
scikit-learn
125
sklearn/covariance/_shrunk_covariance.py
def fit(self, X, y=None): """Fit the Oracle Approximating Shrinkage covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_OAS.fit.txt
def fit(self, X, y=None): """Fit the Oracle Approximating Shrinkage covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X) # Not calling the parent object to fit, to avoid computing the # covariance matrix (and potentially the precision) if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance, shrinkage = _oas(X - self.location_, assume_centered=True) self.shrinkage_ = shrinkage self._set_covariance(covariance) return self
OAS.fit
scikit-learn
126
sklearn/preprocessing/_encoders.py
def fit(self, X, y=None): """ Fit OneHotEncoder to X. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. Returns ------- self Fitted encoder. """
/usr/src/app/target_test_cases/failed_tests_OneHotEncoder.fit.txt
def fit(self, X, y=None): """ Fit OneHotEncoder to X. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to determine the categories of each feature. y : None Ignored. This parameter exists only for compatibility with :class:`~sklearn.pipeline.Pipeline`. Returns ------- self Fitted encoder. """ self._fit( X, handle_unknown=self.handle_unknown, ensure_all_finite="allow-nan", ) self._set_drop_idx() self._n_features_outs = self._compute_n_features_outs() return self
OneHotEncoder.fit
scikit-learn
127
sklearn/preprocessing/_encoders.py
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """
/usr/src/app/target_test_cases/failed_tests_OneHotEncoder.get_feature_names_out.txt
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Input features. - If `input_features` is `None`, then `feature_names_in_` is used as feature names in. If `feature_names_in_` is not defined, then the following input feature names are generated: `["x0", "x1", ..., "x(n_features_in_ - 1)"]`. - If `input_features` is an array-like, then `input_features` must match `feature_names_in_` if `feature_names_in_` is defined. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self) input_features = _check_feature_names_in(self, input_features) cats = [ self._compute_transformed_categories(i) for i, _ in enumerate(self.categories_) ] name_combiner = self._check_get_feature_name_combiner() feature_names = [] for i in range(len(cats)): names = [name_combiner(input_features[i], t) for t in cats[i]] feature_names.extend(names) return np.array(feature_names, dtype=object)
OneHotEncoder.get_feature_names_out
scikit-learn
128
sklearn/preprocessing/_encoders.py
def transform(self, X): """ Transform X using one-hot encoding. If `sparse_output=True` (default), it returns an instance of :class:`scipy.sparse._csr.csr_matrix` (CSR format). If there are infrequent categories for a feature, set by specifying `max_categories` or `min_frequency`, the infrequent categories are grouped into a single category. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : {ndarray, sparse matrix} of shape \ (n_samples, n_encoded_features) Transformed input. If `sparse_output=True`, a sparse matrix will be returned. """
/usr/src/app/target_test_cases/failed_tests_OneHotEncoder.transform.txt
def transform(self, X): """ Transform X using one-hot encoding. If `sparse_output=True` (default), it returns an instance of :class:`scipy.sparse._csr.csr_matrix` (CSR format). If there are infrequent categories for a feature, set by specifying `max_categories` or `min_frequency`, the infrequent categories are grouped into a single category. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : {ndarray, sparse matrix} of shape \ (n_samples, n_encoded_features) Transformed input. If `sparse_output=True`, a sparse matrix will be returned. """ check_is_fitted(self) transform_output = _get_output_config("transform", estimator=self)["dense"] if transform_output != "default" and self.sparse_output: capitalize_transform_output = transform_output.capitalize() raise ValueError( f"{capitalize_transform_output} output does not support sparse data." f" Set sparse_output=False to output {transform_output} dataframes or" f" disable {capitalize_transform_output} output via" '` ohe.set_output(transform="default").' ) # validation of X happens in _check_X called by _transform warn_on_unknown = self.drop is not None and self.handle_unknown in { "ignore", "infrequent_if_exist", } X_int, X_mask = self._transform( X, handle_unknown=self.handle_unknown, ensure_all_finite="allow-nan", warn_on_unknown=warn_on_unknown, ) n_samples, n_features = X_int.shape if self._drop_idx_after_grouping is not None: to_drop = self._drop_idx_after_grouping.copy() # We remove all the dropped categories from mask, and decrement all # categories that occur after them to avoid an empty column. keep_cells = X_int != to_drop for i, cats in enumerate(self.categories_): # drop='if_binary' but feature isn't binary if to_drop[i] is None: # set to cardinality to not drop from X_int to_drop[i] = len(cats) to_drop = to_drop.reshape(1, -1) X_int[X_int > to_drop] -= 1 X_mask &= keep_cells mask = X_mask.ravel() feature_indices = np.cumsum([0] + self._n_features_outs) indices = (X_int + feature_indices[:-1]).ravel()[mask] indptr = np.empty(n_samples + 1, dtype=int) indptr[0] = 0 np.sum(X_mask, axis=1, out=indptr[1:], dtype=indptr.dtype) np.cumsum(indptr[1:], out=indptr[1:]) data = np.ones(indptr[-1]) out = sparse.csr_matrix( (data, indices, indptr), shape=(n_samples, feature_indices[-1]), dtype=self.dtype, ) if not self.sparse_output: return out.toarray() else: return out
OneHotEncoder.transform
scikit-learn
129
sklearn/multiclass.py
def decision_function(self, X): """Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- Y : array-like of shape (n_samples, n_classes) or (n_samples,) Result of calling `decision_function` on the final estimator. .. versionchanged:: 0.19 output shape changed to ``(n_samples,)`` to conform to scikit-learn conventions for binary classification. """
/usr/src/app/target_test_cases/failed_tests_OneVsOneClassifier.decision_function.txt
def decision_function(self, X): """Decision function for the OneVsOneClassifier. The decision values for the samples are computed by adding the normalized sum of pair-wise classification confidence levels to the votes in order to disambiguate between the decision values when the votes for all the classes are equal leading to a tie. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- Y : array-like of shape (n_samples, n_classes) or (n_samples,) Result of calling `decision_function` on the final estimator. .. versionchanged:: 0.19 output shape changed to ``(n_samples,)`` to conform to scikit-learn conventions for binary classification. """ check_is_fitted(self) X = validate_data( self, X, accept_sparse=True, ensure_all_finite=False, reset=False, ) indices = self.pairwise_indices_ if indices is None: Xs = [X] * len(self.estimators_) else: Xs = [X[:, idx] for idx in indices] predictions = np.vstack( [est.predict(Xi) for est, Xi in zip(self.estimators_, Xs)] ).T confidences = np.vstack( [_predict_binary(est, Xi) for est, Xi in zip(self.estimators_, Xs)] ).T Y = _ovr_decision_function(predictions, confidences, len(self.classes_)) if self.n_classes_ == 2: return Y[:, 1] return Y
OneVsOneClassifier.decision_function
scikit-learn
130
sklearn/multiclass.py
def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The fitted underlying estimator. """
/usr/src/app/target_test_cases/failed_tests_OneVsOneClassifier.fit.txt
def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The fitted underlying estimator. """ _raise_for_params(fit_params, self, "fit") routed_params = process_routing( self, "fit", **fit_params, ) # We need to validate the data because we do a safe_indexing later. X, y = validate_data( self, X, y, accept_sparse=["csr", "csc"], ensure_all_finite=False ) check_classification_targets(y) self.classes_ = np.unique(y) if len(self.classes_) == 1: raise ValueError( "OneVsOneClassifier can not be fit when only one class is present." ) n_classes = self.classes_.shape[0] estimators_indices = list( zip( *( Parallel(n_jobs=self.n_jobs)( delayed(_fit_ovo_binary)( self.estimator, X, y, self.classes_[i], self.classes_[j], fit_params=routed_params.estimator.fit, ) for i in range(n_classes) for j in range(i + 1, n_classes) ) ) ) ) self.estimators_ = estimators_indices[0] pairwise = self.__sklearn_tags__().input_tags.pairwise self.pairwise_indices_ = estimators_indices[1] if pairwise else None return self
OneVsOneClassifier.fit
scikit-learn
131
sklearn/multiclass.py
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iteration, where the first call should have an array of all target variables. Parameters ---------- X : {array-like, sparse matrix) of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The partially fitted underlying estimator. """
/usr/src/app/target_test_cases/failed_tests_OneVsOneClassifier.partial_fit.txt
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iteration, where the first call should have an array of all target variables. Parameters ---------- X : {array-like, sparse matrix) of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The partially fitted underlying estimator. """ _raise_for_params(partial_fit_params, self, "partial_fit") routed_params = process_routing( self, "partial_fit", **partial_fit_params, ) first_call = _check_partial_fit_first_call(self, classes) if first_call: self.estimators_ = [ clone(self.estimator) for _ in range(self.n_classes_ * (self.n_classes_ - 1) // 2) ] if len(np.setdiff1d(y, self.classes_)): raise ValueError( "Mini-batch contains {0} while it must be subset of {1}".format( np.unique(y), self.classes_ ) ) X, y = validate_data( self, X, y, accept_sparse=["csr", "csc"], ensure_all_finite=False, reset=first_call, ) check_classification_targets(y) combinations = itertools.combinations(range(self.n_classes_), 2) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_ovo_binary)( estimator, X, y, self.classes_[i], self.classes_[j], partial_fit_params=routed_params.estimator.partial_fit, ) for estimator, (i, j) in zip(self.estimators_, (combinations)) ) self.pairwise_indices_ = None if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ return self
OneVsOneClassifier.partial_fit
scikit-learn
132
sklearn/multiclass.py
def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_OneVsRestClassifier.fit.txt
def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of fitted estimator. """ _raise_for_params(fit_params, self, "fit") routed_params = process_routing( self, "fit", **fit_params, ) # A sparse LabelBinarizer, with sparse_output=True, has been shown to # outperform or match a dense label binarizer in all cases and has also # resulted in less or equal memory consumption in the fit_ovr function # overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) Y = self.label_binarizer_.fit_transform(y) Y = Y.tocsc() self.classes_ = self.label_binarizer_.classes_ columns = (col.toarray().ravel() for col in Y.T) # In cases where individual estimators are very fast to train setting # n_jobs > 1 in can results in slower performance due to the overhead # of spawning threads. See joblib issue #112. self.estimators_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_fit_binary)( self.estimator, X, column, fit_params=routed_params.estimator.fit, classes=[ "not %s" % self.label_binarizer_.classes_[i], self.label_binarizer_.classes_[i], ], ) for i, column in enumerate(columns) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self
OneVsRestClassifier.fit
scikit-learn
133
sklearn/multiclass.py
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iterations. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of partially fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_OneVsRestClassifier.partial_fit.txt
def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iterations. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of partially fitted estimator. """ _raise_for_params(partial_fit_params, self, "partial_fit") routed_params = process_routing( self, "partial_fit", **partial_fit_params, ) if _check_partial_fit_first_call(self, classes): self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)] # A sparse LabelBinarizer, with sparse_output=True, has been # shown to outperform or match a dense label binarizer in all # cases and has also resulted in less or equal memory consumption # in the fit_ovr function overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) self.label_binarizer_.fit(self.classes_) if len(np.setdiff1d(y, self.classes_)): raise ValueError( ( "Mini-batch contains {0} while classes " + "must be subset of {1}" ).format(np.unique(y), self.classes_) ) Y = self.label_binarizer_.transform(y) Y = Y.tocsc() columns = (col.toarray().ravel() for col in Y.T) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_binary)( estimator, X, column, partial_fit_params=routed_params.estimator.partial_fit, ) for estimator, column in zip(self.estimators_, columns) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ return self
OneVsRestClassifier.partial_fit
scikit-learn
134
sklearn/multiclass.py
def predict(self, X): """Predict multi-class targets using underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Predicted multi-class targets. """
/usr/src/app/target_test_cases/failed_tests_OneVsRestClassifier.predict.txt
def predict(self, X): """Predict multi-class targets using underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Predicted multi-class targets. """ check_is_fitted(self) n_samples = _num_samples(X) if self.label_binarizer_.y_type_ == "multiclass": maxima = np.empty(n_samples, dtype=float) maxima.fill(-np.inf) argmaxima = np.zeros(n_samples, dtype=int) for i, e in enumerate(self.estimators_): pred = _predict_binary(e, X) np.maximum(maxima, pred, out=maxima) argmaxima[maxima == pred] = i return self.classes_[argmaxima] else: thresh = _threshold_for_binary_predict(self.estimators_[0]) indices = array.array("i") indptr = array.array("i", [0]) for e in self.estimators_: indices.extend(np.where(_predict_binary(e, X) > thresh)[0]) indptr.append(len(indices)) data = np.ones(len(indices), dtype=int) indicator = sp.csc_matrix( (data, indices, indptr), shape=(n_samples, len(self.estimators_)) ) return self.label_binarizer_.inverse_transform(indicator)
OneVsRestClassifier.predict
scikit-learn
135
sklearn/multiclass.py
def predict_proba(self, X): """Probability estimates. The returned estimates for all classes are ordered by label of classes. Note that in the multilabel case, each sample can have any number of labels. This returns the marginal probability that the given sample has the label in question. For example, it is entirely consistent that two labels both have a 90% probability of applying to a given sample. In the single label multiclass case, the rows of the returned matrix sum to 1. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """
/usr/src/app/target_test_cases/failed_tests_OneVsRestClassifier.predict_proba.txt
def predict_proba(self, X): """Probability estimates. The returned estimates for all classes are ordered by label of classes. Note that in the multilabel case, each sample can have any number of labels. This returns the marginal probability that the given sample has the label in question. For example, it is entirely consistent that two labels both have a 90% probability of applying to a given sample. In the single label multiclass case, the rows of the returned matrix sum to 1. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ check_is_fitted(self) # Y[i, j] gives the probability that sample i has the label j. # In the multi-label case, these are not disjoint. Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T if len(self.estimators_) == 1: # Only one estimator, but we still want to return probabilities # for two classes. Y = np.concatenate(((1 - Y), Y), axis=1) if not self.multilabel_: # Then, probabilities should be normalized to 1. Y /= np.sum(Y, axis=1)[:, np.newaxis] return Y
OneVsRestClassifier.predict_proba
scikit-learn
136
sklearn/linear_model/_omp.py
def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. Returns ------- self : object Returns an instance of self. """
/usr/src/app/target_test_cases/failed_tests_OrthogonalMatchingPursuit.fit.txt
def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Will be cast to X's dtype if necessary. Returns ------- self : object Returns an instance of self. """ X, y = validate_data(self, X, y, multi_output=True, y_numeric=True) n_features = X.shape[1] X, y, X_offset, y_offset, X_scale, Gram, Xy = _pre_fit( X, y, None, self.precompute, self.fit_intercept, copy=True ) if y.ndim == 1: y = y[:, np.newaxis] if self.n_nonzero_coefs is None and self.tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) elif self.tol is not None: self.n_nonzero_coefs_ = None else: self.n_nonzero_coefs_ = self.n_nonzero_coefs if Gram is False: coef_, self.n_iter_ = orthogonal_mp( X, y, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, precompute=False, copy_X=True, return_n_iter=True, ) else: norms_sq = np.sum(y**2, axis=0) if self.tol is not None else None coef_, self.n_iter_ = orthogonal_mp_gram( Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True, ) self.coef_ = coef_.T self._set_intercept(X_offset, y_offset, X_scale) return self
OrthogonalMatchingPursuit.fit
scikit-learn
137
sklearn/linear_model/_omp.py
def fit(self, X, y, **fit_params): """Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. **fit_params : dict Parameters to pass to the underlying splitter. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns an instance of self. """
/usr/src/app/target_test_cases/failed_tests_OrthogonalMatchingPursuitCV.fit.txt
def fit(self, X, y, **fit_params): """Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. Will be cast to X's dtype if necessary. **fit_params : dict Parameters to pass to the underlying splitter. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns an instance of self. """ _raise_for_params(fit_params, self, "fit") X, y = validate_data(self, X, y, y_numeric=True, ensure_min_features=2) X = as_float_array(X, copy=False, ensure_all_finite=False) cv = check_cv(self.cv, classifier=False) if _routing_enabled(): routed_params = process_routing(self, "fit", **fit_params) else: # TODO(SLEP6): remove when metadata routing cannot be disabled. routed_params = Bunch() routed_params.splitter = Bunch(split={}) max_iter = ( min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) if not self.max_iter else self.max_iter ) cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_omp_path_residues)( X[train], y[train], X[test], y[test], self.copy, self.fit_intercept, max_iter, ) for train, test in cv.split(X, **routed_params.splitter.split) ) min_early_stop = min(fold.shape[0] for fold in cv_paths) mse_folds = np.array( [(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths] ) best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 self.n_nonzero_coefs_ = best_n_nonzero_coefs omp = OrthogonalMatchingPursuit( n_nonzero_coefs=best_n_nonzero_coefs, fit_intercept=self.fit_intercept, ).fit(X, y) self.coef_ = omp.coef_ self.intercept_ = omp.intercept_ self.n_iter_ = omp.n_iter_ return self
OrthogonalMatchingPursuitCV.fit
scikit-learn
138
sklearn/multiclass.py
def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns a fitted instance of self. """
/usr/src/app/target_test_cases/failed_tests_OutputCodeClassifier.fit.txt
def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns a fitted instance of self. """ _raise_for_params(fit_params, self, "fit") routed_params = process_routing( self, "fit", **fit_params, ) y = validate_data(self, X="no_validation", y=y) random_state = check_random_state(self.random_state) check_classification_targets(y) self.classes_ = np.unique(y) n_classes = self.classes_.shape[0] if n_classes == 0: raise ValueError( "OutputCodeClassifier can not be fit when no class is present." ) n_estimators = int(n_classes * self.code_size) # FIXME: there are more elaborate methods than generating the codebook # randomly. self.code_book_ = random_state.uniform(size=(n_classes, n_estimators)) self.code_book_[self.code_book_ > 0.5] = 1.0 if hasattr(self.estimator, "decision_function"): self.code_book_[self.code_book_ != 1] = -1.0 else: self.code_book_[self.code_book_ != 1] = 0.0 classes_index = {c: i for i, c in enumerate(self.classes_)} Y = np.array( [self.code_book_[classes_index[y[i]]] for i in range(_num_samples(y))], dtype=int, ) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_binary)( self.estimator, X, Y[:, i], fit_params=routed_params.estimator.fit ) for i in range(Y.shape[1]) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self
OutputCodeClassifier.fit
scikit-learn
139
sklearn/multiclass.py
def predict(self, X): """Predict multi-class targets using underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : ndarray of shape (n_samples,) Predicted multi-class targets. """
/usr/src/app/target_test_cases/failed_tests_OutputCodeClassifier.predict.txt
def predict(self, X): """Predict multi-class targets using underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : ndarray of shape (n_samples,) Predicted multi-class targets. """ check_is_fitted(self) # ArgKmin only accepts C-contiguous array. The aggregated predictions need to be # transposed. We therefore create a F-contiguous array to avoid a copy and have # a C-contiguous array after the transpose operation. Y = np.array( [_predict_binary(e, X) for e in self.estimators_], order="F", dtype=np.float64, ).T pred = pairwise_distances_argmin(Y, self.code_book_, metric="euclidean") return self.classes_[pred]
OutputCodeClassifier.predict
scikit-learn
140
sklearn/decomposition/_pca.py
def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed values. Notes ----- This method returns a Fortran-ordered array. To convert it to a C-ordered array, use 'np.ascontiguousarray'. """
/usr/src/app/target_test_cases/failed_tests_PCA.fit_transform.txt
def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed values. Notes ----- This method returns a Fortran-ordered array. To convert it to a C-ordered array, use 'np.ascontiguousarray'. """ U, S, _, X, x_is_centered, xp = self._fit(X) if U is not None: U = U[:, : self.n_components_] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= sqrt(X.shape[0] - 1) else: # X_new = X * V = U * S * Vt * V = U * S U *= S[: self.n_components_] return U else: # solver="covariance_eigh" does not compute U at fit time. return self._transform(X, xp, x_is_centered=x_is_centered)
PCA.fit_transform
scikit-learn
141
sklearn/linear_model/_passive_aggressive.py
def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. coef_init : ndarray of shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (n_classes,) The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_PassiveAggressiveClassifier.fit.txt
def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. coef_init : ndarray of shape (n_classes, n_features) The initial coefficients to warm-start the optimization. intercept_init : ndarray of shape (n_classes,) The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator. """ self._more_validate_params() lr = "pa1" if self.loss == "hinge" else "pa2" return self._fit( X, y, alpha=1.0, C=self.C, loss="hinge", learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init, )
PassiveAggressiveClassifier.fit
scikit-learn
142
sklearn/linear_model/_passive_aggressive.py
def partial_fit(self, X, y, classes=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of the training data. y : array-like of shape (n_samples,) Subset of the target values. classes : ndarray of shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_PassiveAggressiveClassifier.partial_fit.txt
def partial_fit(self, X, y, classes=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of the training data. y : array-like of shape (n_samples,) Subset of the target values. classes : ndarray of shape (n_classes,) Classes across all calls to partial_fit. Can be obtained by via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that y doesn't need to contain all labels in `classes`. Returns ------- self : object Fitted estimator. """ if not hasattr(self, "classes_"): self._more_validate_params(for_partial_fit=True) if self.class_weight == "balanced": raise ValueError( "class_weight 'balanced' is not supported for " "partial_fit. For 'balanced' weights, use " "`sklearn.utils.compute_class_weight` with " "`class_weight='balanced'`. In place of y you " "can use a large enough subset of the full " "training set target to properly estimate the " "class frequency distributions. Pass the " "resulting weights as the class_weight " "parameter." ) lr = "pa1" if self.loss == "hinge" else "pa2" return self._partial_fit( X, y, alpha=1.0, C=self.C, loss="hinge", learning_rate=lr, max_iter=1, classes=classes, sample_weight=None, coef_init=None, intercept_init=None, )
PassiveAggressiveClassifier.partial_fit
scikit-learn
143
sklearn/linear_model/_passive_aggressive.py
def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : numpy array of shape [n_samples] Target values. coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_PassiveAggressiveRegressor.fit.txt
def fit(self, X, y, coef_init=None, intercept_init=None): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : numpy array of shape [n_samples] Target values. coef_init : array, shape = [n_features] The initial coefficients to warm-start the optimization. intercept_init : array, shape = [1] The initial intercept to warm-start the optimization. Returns ------- self : object Fitted estimator. """ self._more_validate_params() lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._fit( X, y, alpha=1.0, C=self.C, loss="epsilon_insensitive", learning_rate=lr, coef_init=coef_init, intercept_init=intercept_init, )
PassiveAggressiveRegressor.fit
scikit-learn
144
sklearn/linear_model/_passive_aggressive.py
def partial_fit(self, X, y): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of training data. y : numpy array of shape [n_samples] Subset of target values. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_PassiveAggressiveRegressor.partial_fit.txt
def partial_fit(self, X, y): """Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of training data. y : numpy array of shape [n_samples] Subset of target values. Returns ------- self : object Fitted estimator. """ if not hasattr(self, "coef_"): self._more_validate_params(for_partial_fit=True) lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2" return self._partial_fit( X, y, alpha=1.0, C=self.C, loss="epsilon_insensitive", learning_rate=lr, max_iter=1, sample_weight=None, coef_init=None, intercept_init=None, )
PassiveAggressiveRegressor.partial_fit
scikit-learn
145
sklearn/feature_extraction/image.py
def transform(self, X): """Transform the image samples in `X` into a matrix of patch data. Parameters ---------- X : ndarray of shape (n_samples, image_height, image_width) or \ (n_samples, image_height, image_width, n_channels) Array of images from which to extract patches. For color images, the last dimension specifies the channel: a RGB image would have `n_channels=3`. Returns ------- patches : array of shape (n_patches, patch_height, patch_width) or \ (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the images, where `n_patches` is either `n_samples * max_patches` or the total number of patches that can be extracted. """
/usr/src/app/target_test_cases/failed_tests_PatchExtractor.transform.txt
def transform(self, X): """Transform the image samples in `X` into a matrix of patch data. Parameters ---------- X : ndarray of shape (n_samples, image_height, image_width) or \ (n_samples, image_height, image_width, n_channels) Array of images from which to extract patches. For color images, the last dimension specifies the channel: a RGB image would have `n_channels=3`. Returns ------- patches : array of shape (n_patches, patch_height, patch_width) or \ (n_patches, patch_height, patch_width, n_channels) The collection of patches extracted from the images, where `n_patches` is either `n_samples * max_patches` or the total number of patches that can be extracted. """ X = validate_data( self, X=X, ensure_2d=False, allow_nd=True, ensure_min_samples=1, ensure_min_features=1, reset=False, ) random_state = check_random_state(self.random_state) n_imgs, img_height, img_width = X.shape[:3] if self.patch_size is None: patch_size = img_height // 10, img_width // 10 else: if len(self.patch_size) != 2: raise ValueError( "patch_size must be a tuple of two integers. Got" f" {self.patch_size} instead." ) patch_size = self.patch_size n_imgs, img_height, img_width = X.shape[:3] X = np.reshape(X, (n_imgs, img_height, img_width, -1)) n_channels = X.shape[-1] # compute the dimensions of the patches array patch_height, patch_width = patch_size n_patches = _compute_n_patches( img_height, img_width, patch_height, patch_width, self.max_patches ) patches_shape = (n_imgs * n_patches,) + patch_size if n_channels > 1: patches_shape += (n_channels,) # extract the patches patches = np.empty(patches_shape) for ii, image in enumerate(X): patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d( image, patch_size, max_patches=self.max_patches, random_state=random_state, ) return patches
PatchExtractor.transform
scikit-learn
146
sklearn/pipeline.py
def decision_function(self, X, **params): """Transform the data, and apply `decision_function` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `decision_function` method. Only valid if the final estimator implements `decision_function`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of string -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_score : ndarray of shape (n_samples, n_classes) Result of calling `decision_function` on the final estimator. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.decision_function.txt
def decision_function(self, X, **params): """Transform the data, and apply `decision_function` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `decision_function` method. Only valid if the final estimator implements `decision_function`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of string -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_score : ndarray of shape (n_samples, n_classes) Result of calling `decision_function` on the final estimator. """ _raise_for_params(params, self, "decision_function") # not branching here since params is only available if # enable_metadata_routing=True routed_params = process_routing(self, "decision_function", **params) Xt = X for _, name, transform in self._iter(with_final=False): Xt = transform.transform( Xt, **routed_params.get(name, {}).get("transform", {}) ) return self.steps[-1][1].decision_function( Xt, **routed_params.get(self.steps[-1][0], {}).get("decision_function", {}) )
Pipeline.decision_function
scikit-learn
147
sklearn/pipeline.py
def fit(self, X, y=None, **params): """Fit the model. Fit all the transformers one after the other and sequentially transform the data. Finally, fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Pipeline with fitted steps. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.fit.txt
def fit(self, X, y=None, **params): """Fit the model. Fit all the transformers one after the other and sequentially transform the data. Finally, fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Pipeline with fitted steps. """ routed_params = self._check_method_params(method="fit", props=params) Xt = self._fit(X, y, routed_params) with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): if self._final_estimator != "passthrough": last_step_params = routed_params[self.steps[-1][0]] self._final_estimator.fit(Xt, y, **last_step_params["fit"]) return self
Pipeline.fit
scikit-learn
148
sklearn/pipeline.py
def fit_transform(self, X, y=None, **params): """Fit the model and transform with the final estimator. Fit all the transformers one after the other and sequentially transform the data. Only valid if the final estimator either implements `fit_transform` or `fit` and `transform`. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : ndarray of shape (n_samples, n_transformed_features) Transformed samples. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.fit_transform.txt
def fit_transform(self, X, y=None, **params): """Fit the model and transform with the final estimator. Fit all the transformers one after the other and sequentially transform the data. Only valid if the final estimator either implements `fit_transform` or `fit` and `transform`. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : ndarray of shape (n_samples, n_transformed_features) Transformed samples. """ routed_params = self._check_method_params(method="fit_transform", props=params) Xt = self._fit(X, y, routed_params) last_step = self._final_estimator with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): if last_step == "passthrough": return Xt last_step_params = routed_params[self.steps[-1][0]] if hasattr(last_step, "fit_transform"): return last_step.fit_transform( Xt, y, **last_step_params["fit_transform"] ) else: return last_step.fit(Xt, y, **last_step_params["fit"]).transform( Xt, **last_step_params["transform"] )
Pipeline.fit_transform
scikit-learn
149
sklearn/pipeline.py
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Transform input features using the pipeline. Parameters ---------- input_features : array-like of str or None, default=None Input features. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.get_feature_names_out.txt
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Transform input features using the pipeline. Parameters ---------- input_features : array-like of str or None, default=None Input features. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ feature_names_out = input_features for _, name, transform in self._iter(): if not hasattr(transform, "get_feature_names_out"): raise AttributeError( "Estimator {} does not provide get_feature_names_out. " "Did you mean to call pipeline[:-1].get_feature_names_out" "()?".format(name) ) feature_names_out = transform.get_feature_names_out(feature_names_out) return feature_names_out
Pipeline.get_feature_names_out
scikit-learn
150
sklearn/pipeline.py
def inverse_transform(self, X=None, *, Xt=None, **params): """Apply `inverse_transform` for each step in a reverse order. All estimators in the pipeline must support `inverse_transform`. Parameters ---------- X : array-like of shape (n_samples, n_transformed_features) Data samples, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Must fulfill input requirements of last step of pipeline's ``inverse_transform`` method. Xt : array-like of shape (n_samples, n_transformed_features) Data samples, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Must fulfill input requirements of last step of pipeline's ``inverse_transform`` method. .. deprecated:: 1.5 `Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : ndarray of shape (n_samples, n_features) Inverse transformed data, that is, data in the original feature space. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.inverse_transform.txt
def inverse_transform(self, X=None, *, Xt=None, **params): """Apply `inverse_transform` for each step in a reverse order. All estimators in the pipeline must support `inverse_transform`. Parameters ---------- X : array-like of shape (n_samples, n_transformed_features) Data samples, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Must fulfill input requirements of last step of pipeline's ``inverse_transform`` method. Xt : array-like of shape (n_samples, n_transformed_features) Data samples, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Must fulfill input requirements of last step of pipeline's ``inverse_transform`` method. .. deprecated:: 1.5 `Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : ndarray of shape (n_samples, n_features) Inverse transformed data, that is, data in the original feature space. """ _raise_for_params(params, self, "inverse_transform") X = _deprecate_Xt_in_inverse_transform(X, Xt) # we don't have to branch here, since params is only non-empty if # enable_metadata_routing=True. routed_params = process_routing(self, "inverse_transform", **params) reverse_iter = reversed(list(self._iter())) for _, name, transform in reverse_iter: X = transform.inverse_transform(X, **routed_params[name].inverse_transform) return X
Pipeline.inverse_transform
scikit-learn
151
sklearn/pipeline.py
def predict(self, X, **params): """Transform the data, and apply `predict` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `predict` method. Only valid if the final estimator implements `predict`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the ``predict`` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Note that while this may be used to return uncertainties from some models with ``return_std`` or ``return_cov``, uncertainties that are generated by the transformations in the pipeline are not propagated to the final estimator. Returns ------- y_pred : ndarray Result of calling `predict` on the final estimator. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.predict.txt
def predict(self, X, **params): """Transform the data, and apply `predict` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `predict` method. Only valid if the final estimator implements `predict`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the ``predict`` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Note that while this may be used to return uncertainties from some models with ``return_std`` or ``return_cov``, uncertainties that are generated by the transformations in the pipeline are not propagated to the final estimator. Returns ------- y_pred : ndarray Result of calling `predict` on the final estimator. """ Xt = X if not _routing_enabled(): for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt) return self.steps[-1][1].predict(Xt, **params) # metadata routing enabled routed_params = process_routing(self, "predict", **params) for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt, **routed_params[name].transform) return self.steps[-1][1].predict(Xt, **routed_params[self.steps[-1][0]].predict)
Pipeline.predict
scikit-learn
152
sklearn/pipeline.py
def predict_log_proba(self, X, **params): """Transform the data, and apply `predict_log_proba` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `predict_log_proba` method. Only valid if the final estimator implements `predict_log_proba`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the `predict_log_proba` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_log_proba : ndarray of shape (n_samples, n_classes) Result of calling `predict_log_proba` on the final estimator. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.predict_log_proba.txt
def predict_log_proba(self, X, **params): """Transform the data, and apply `predict_log_proba` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `predict_log_proba` method. Only valid if the final estimator implements `predict_log_proba`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the `predict_log_proba` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_log_proba : ndarray of shape (n_samples, n_classes) Result of calling `predict_log_proba` on the final estimator. """ Xt = X if not _routing_enabled(): for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt) return self.steps[-1][1].predict_log_proba(Xt, **params) # metadata routing enabled routed_params = process_routing(self, "predict_log_proba", **params) for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt, **routed_params[name].transform) return self.steps[-1][1].predict_log_proba( Xt, **routed_params[self.steps[-1][0]].predict_log_proba )
Pipeline.predict_log_proba
scikit-learn
153
sklearn/pipeline.py
def predict_proba(self, X, **params): """Transform the data, and apply `predict_proba` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `predict_proba` method. Only valid if the final estimator implements `predict_proba`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the `predict_proba` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_proba : ndarray of shape (n_samples, n_classes) Result of calling `predict_proba` on the final estimator. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.predict_proba.txt
def predict_proba(self, X, **params): """Transform the data, and apply `predict_proba` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `predict_proba` method. Only valid if the final estimator implements `predict_proba`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the `predict_proba` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_proba : ndarray of shape (n_samples, n_classes) Result of calling `predict_proba` on the final estimator. """ Xt = X if not _routing_enabled(): for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt) return self.steps[-1][1].predict_proba(Xt, **params) # metadata routing enabled routed_params = process_routing(self, "predict_proba", **params) for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt, **routed_params[name].transform) return self.steps[-1][1].predict_proba( Xt, **routed_params[self.steps[-1][0]].predict_proba )
Pipeline.predict_proba
scikit-learn
154
sklearn/pipeline.py
def score(self, X, y=None, sample_weight=None, **params): """Transform the data, and apply `score` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `score` method. Only valid if the final estimator implements `score`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. sample_weight : array-like, default=None If not None, this argument is passed as ``sample_weight`` keyword argument to the ``score`` method of the final estimator. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- score : float Result of calling `score` on the final estimator. """
/usr/src/app/target_test_cases/failed_tests_Pipeline.score.txt
def score(self, X, y=None, sample_weight=None, **params): """Transform the data, and apply `score` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `score` method. Only valid if the final estimator implements `score`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. sample_weight : array-like, default=None If not None, this argument is passed as ``sample_weight`` keyword argument to the ``score`` method of the final estimator. **params : dict of str -> object Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- score : float Result of calling `score` on the final estimator. """ Xt = X if not _routing_enabled(): for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt) score_params = {} if sample_weight is not None: score_params["sample_weight"] = sample_weight return self.steps[-1][1].score(Xt, y, **score_params) # metadata routing is enabled. routed_params = process_routing( self, "score", sample_weight=sample_weight, **params ) Xt = X for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt, **routed_params[name].transform) return self.steps[-1][1].score(Xt, y, **routed_params[self.steps[-1][0]].score)
Pipeline.score
scikit-learn
155
sklearn/kernel_approximation.py
def transform(self, X): """Generate the feature map approximation for X. Parameters ---------- X : {array-like}, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_PolynomialCountSketch.transform.txt
def transform(self, X): """Generate the feature map approximation for X. Parameters ---------- X : {array-like}, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """ check_is_fitted(self) X = validate_data(self, X, accept_sparse="csc", reset=False) X_gamma = np.sqrt(self.gamma) * X if sp.issparse(X_gamma) and self.coef0 != 0: X_gamma = sp.hstack( [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))], format="csc", ) elif not sp.issparse(X_gamma) and self.coef0 != 0: X_gamma = np.hstack( [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))] ) if X_gamma.shape[1] != self.indexHash_.shape[1]: raise ValueError( "Number of features of test samples does not" " match that of training samples." ) count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components)) if sp.issparse(X_gamma): for j in range(X_gamma.shape[1]): for d in range(self.degree): iHashIndex = self.indexHash_[d, j] iHashBit = self.bitHash_[d, j] count_sketches[:, d, iHashIndex] += ( (iHashBit * X_gamma[:, [j]]).toarray().ravel() ) else: for j in range(X_gamma.shape[1]): for d in range(self.degree): iHashIndex = self.indexHash_[d, j] iHashBit = self.bitHash_[d, j] count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j] # For each same, compute a count sketch of phi(x) using the polynomial # multiplication (via FFT) of p count sketches of x. count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True) count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1) data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True)) return data_sketch
PolynomialCountSketch.transform
scikit-learn
156
sklearn/preprocessing/_polynomial.py
def fit(self, X, y=None): """ Compute number of output features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted transformer. """
/usr/src/app/target_test_cases/failed_tests_PolynomialFeatures.fit.txt
def fit(self, X, y=None): """ Compute number of output features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted transformer. """ _, n_features = validate_data(self, X, accept_sparse=True).shape if isinstance(self.degree, Integral): if self.degree == 0 and not self.include_bias: raise ValueError( "Setting degree to zero and include_bias to False would result in" " an empty output array." ) self._min_degree = 0 self._max_degree = self.degree elif ( isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2 ): self._min_degree, self._max_degree = self.degree if not ( isinstance(self._min_degree, Integral) and isinstance(self._max_degree, Integral) and self._min_degree >= 0 and self._min_degree <= self._max_degree ): raise ValueError( "degree=(min_degree, max_degree) must " "be non-negative integers that fulfil " "min_degree <= max_degree, got " f"{self.degree}." ) elif self._max_degree == 0 and not self.include_bias: raise ValueError( "Setting both min_degree and max_degree to zero and include_bias to" " False would result in an empty output array." ) else: raise ValueError( "degree must be a non-negative int or tuple " "(min_degree, max_degree), got " f"{self.degree}." ) self.n_output_features_ = self._num_combinations( n_features=n_features, min_degree=self._min_degree, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) if self.n_output_features_ > np.iinfo(np.intp).max: msg = ( "The output that would result from the current configuration would" f" have {self.n_output_features_} features which is too large to be" f" indexed by {np.intp().dtype.name}. Please change some or all of the" " following:\n- The number of features in the input, currently" f" {n_features=}\n- The range of degrees to calculate, currently" f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only" f" interaction terms, currently {self.interaction_only}\n- Whether to" f" include a bias term, currently {self.include_bias}." ) if ( np.intp == np.int32 and self.n_output_features_ <= np.iinfo(np.int64).max ): # pragma: nocover msg += ( "\nNote that the current Python runtime has a limited 32 bit " "address space and that this configuration would have been " "admissible if run on a 64 bit Python runtime." ) raise ValueError(msg) # We also record the number of output features for # _max_degree = 0 self._n_out_full = self._num_combinations( n_features=n_features, min_degree=0, max_degree=self._max_degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) return self
PolynomialFeatures.fit
scikit-learn
157
sklearn/discriminant_analysis.py
def fit(self, X, y): """Fit the model according to the given training data and parameters. .. versionchanged:: 0.19 ``store_covariances`` has been moved to main constructor as ``store_covariance``. .. versionchanged:: 0.19 ``tol`` has been moved to main constructor. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values (integers). Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_QuadraticDiscriminantAnalysis.fit.txt
def fit(self, X, y): """Fit the model according to the given training data and parameters. .. versionchanged:: 0.19 ``store_covariances`` has been moved to main constructor as ``store_covariance``. .. versionchanged:: 0.19 ``tol`` has been moved to main constructor. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values (integers). Returns ------- self : object Fitted estimator. """ X, y = validate_data(self, X, y) check_classification_targets(y) self.classes_, y = np.unique(y, return_inverse=True) n_samples, n_features = X.shape n_classes = len(self.classes_) if n_classes < 2: raise ValueError( "The number of classes has to be greater than one; got %d class" % (n_classes) ) if self.priors is None: self.priors_ = np.bincount(y) / float(n_samples) else: self.priors_ = np.array(self.priors) cov = None store_covariance = self.store_covariance if store_covariance: cov = [] means = [] scalings = [] rotations = [] for ind in range(n_classes): Xg = X[y == ind, :] meang = Xg.mean(0) means.append(meang) if len(Xg) == 1: raise ValueError( "y has only 1 sample in class %s, covariance is ill defined." % str(self.classes_[ind]) ) Xgc = Xg - meang # Xgc = U * S * V.T _, S, Vt = np.linalg.svd(Xgc, full_matrices=False) S2 = (S**2) / (len(Xg) - 1) S2 = ((1 - self.reg_param) * S2) + self.reg_param rank = np.sum(S2 > self.tol) if rank < n_features: warnings.warn( f"The covariance matrix of class {ind} is not full rank. " "Increasing the value of parameter `reg_param` might help" " reducing the collinearity.", linalg.LinAlgWarning, ) if self.store_covariance or store_covariance: # cov = V * (S^2 / (n-1)) * V.T cov.append(np.dot(S2 * Vt.T, Vt)) scalings.append(S2) rotations.append(Vt.T) if self.store_covariance or store_covariance: self.covariance_ = cov self.means_ = np.asarray(means) self.scalings_ = scalings self.rotations_ = rotations return self
QuadraticDiscriminantAnalysis.fit
scikit-learn
158
sklearn/linear_model/_quantile.py
def fit(self, X, y, sample_weight=None): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- self : object Returns self. """
/usr/src/app/target_test_cases/failed_tests_QuantileRegressor.fit.txt
def fit(self, X, y, sample_weight=None): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- self : object Returns self. """ X, y = validate_data( self, X, y, accept_sparse=["csc", "csr", "coo"], y_numeric=True, multi_output=False, ) sample_weight = _check_sample_weight(sample_weight, X) n_features = X.shape[1] n_params = n_features if self.fit_intercept: n_params += 1 # Note that centering y and X with _preprocess_data does not work # for quantile regression. # The objective is defined as 1/n * sum(pinball loss) + alpha * L1. # So we rescale the penalty term, which is equivalent. alpha = np.sum(sample_weight) * self.alpha if self.solver == "interior-point" and sp_version >= parse_version("1.11.0"): raise ValueError( f"Solver {self.solver} is not anymore available in SciPy >= 1.11.0." ) if sparse.issparse(X) and self.solver not in ["highs", "highs-ds", "highs-ipm"]: raise ValueError( f"Solver {self.solver} does not support sparse X. " "Use solver 'highs' for example." ) # make default solver more stable if self.solver_options is None and self.solver == "interior-point": solver_options = {"lstsq": True} else: solver_options = self.solver_options # After rescaling alpha, the minimization problem is # min sum(pinball loss) + alpha * L1 # Use linear programming formulation of quantile regression # min_x c x # A_eq x = b_eq # 0 <= x # x = (s0, s, t0, t, u, v) = slack variables >= 0 # intercept = s0 - t0 # coef = s - t # c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n) # residual = y - X@coef - intercept = u - v # A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n)) # b_eq = y # p = n_features # n = n_samples # 1_n = vector of length n with entries equal one # see https://stats.stackexchange.com/questions/384909/ # # Filtering out zero sample weights from the beginning makes life # easier for the linprog solver. indices = np.nonzero(sample_weight)[0] n_indices = len(indices) # use n_mask instead of n_samples if n_indices < len(sample_weight): sample_weight = sample_weight[indices] X = _safe_indexing(X, indices) y = _safe_indexing(y, indices) c = np.concatenate( [ np.full(2 * n_params, fill_value=alpha), sample_weight * self.quantile, sample_weight * (1 - self.quantile), ] ) if self.fit_intercept: # do not penalize the intercept c[0] = 0 c[n_params] = 0 if self.solver in ["highs", "highs-ds", "highs-ipm"]: # Note that highs methods always use a sparse CSC memory layout internally, # even for optimization problems parametrized using dense numpy arrays. # Therefore, we work with CSC matrices as early as possible to limit # unnecessary repeated memory copies. eye = sparse.eye(n_indices, dtype=X.dtype, format="csc") if self.fit_intercept: ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype)) A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc") else: A_eq = sparse.hstack([X, -X, eye, -eye], format="csc") else: eye = np.eye(n_indices) if self.fit_intercept: ones = np.ones((n_indices, 1)) A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1) else: A_eq = np.concatenate([X, -X, eye, -eye], axis=1) b_eq = y result = linprog( c=c, A_eq=A_eq, b_eq=b_eq, method=self.solver, options=solver_options, ) solution = result.x if not result.success: failure = { 1: "Iteration limit reached.", 2: "Problem appears to be infeasible.", 3: "Problem appears to be unbounded.", 4: "Numerical difficulties encountered.", } warnings.warn( "Linear programming for QuantileRegressor did not succeed.\n" f"Status is {result.status}: " + failure.setdefault(result.status, "unknown reason") + "\n" + "Result message of linprog:\n" + result.message, ConvergenceWarning, ) # positive slack - negative slack # solution is an array with (params_pos, params_neg, u, v) params = solution[:n_params] - solution[n_params : 2 * n_params] self.n_iter_ = result.nit if self.fit_intercept: self.coef_ = params[1:] self.intercept_ = params[0] else: self.coef_ = params self.intercept_ = 0.0 return self
QuantileRegressor.fit
scikit-learn
159
sklearn/linear_model/_ransac.py
def fit(self, X, y, *, sample_weight=None, **fit_params): """Fit estimator using RANSAC algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample raises error if sample_weight is passed and estimator fit method does not support it. .. versionadded:: 0.18 **fit_params : dict Parameters routed to the `fit` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted `RANSACRegressor` estimator. Raises ------ ValueError If no valid consensus set could be found. This occurs if `is_data_valid` and `is_model_valid` return False for all `max_trials` randomly chosen sub-samples. """
/usr/src/app/target_test_cases/failed_tests_RANSACRegressor.fit.txt
def fit(self, X, y, *, sample_weight=None, **fit_params): """Fit estimator using RANSAC algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. sample_weight : array-like of shape (n_samples,), default=None Individual weights for each sample raises error if sample_weight is passed and estimator fit method does not support it. .. versionadded:: 0.18 **fit_params : dict Parameters routed to the `fit` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted `RANSACRegressor` estimator. Raises ------ ValueError If no valid consensus set could be found. This occurs if `is_data_valid` and `is_model_valid` return False for all `max_trials` randomly chosen sub-samples. """ # Need to validate separately here. We can't pass multi_output=True # because that would allow y to be csr. Delay expensive finiteness # check to the estimator's own input validation. _raise_for_params(fit_params, self, "fit") check_X_params = dict(accept_sparse="csr", ensure_all_finite=False) check_y_params = dict(ensure_2d=False) X, y = validate_data( self, X, y, validate_separately=(check_X_params, check_y_params) ) check_consistent_length(X, y) if self.estimator is not None: estimator = clone(self.estimator) else: estimator = LinearRegression() if self.min_samples is None: if not isinstance(estimator, LinearRegression): raise ValueError( "`min_samples` needs to be explicitly set when estimator " "is not a LinearRegression." ) min_samples = X.shape[1] + 1 elif 0 < self.min_samples < 1: min_samples = np.ceil(self.min_samples * X.shape[0]) elif self.min_samples >= 1: min_samples = self.min_samples if min_samples > X.shape[0]: raise ValueError( "`min_samples` may not be larger than number " "of samples: n_samples = %d." % (X.shape[0]) ) if self.residual_threshold is None: # MAD (median absolute deviation) residual_threshold = np.median(np.abs(y - np.median(y))) else: residual_threshold = self.residual_threshold if self.loss == "absolute_error": if y.ndim == 1: loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred) else: loss_function = lambda y_true, y_pred: np.sum( np.abs(y_true - y_pred), axis=1 ) elif self.loss == "squared_error": if y.ndim == 1: loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2 else: loss_function = lambda y_true, y_pred: np.sum( (y_true - y_pred) ** 2, axis=1 ) elif callable(self.loss): loss_function = self.loss random_state = check_random_state(self.random_state) try: # Not all estimator accept a random_state estimator.set_params(random_state=random_state) except ValueError: pass estimator_fit_has_sample_weight = has_fit_parameter(estimator, "sample_weight") estimator_name = type(estimator).__name__ if sample_weight is not None and not estimator_fit_has_sample_weight: raise ValueError( "%s does not support sample_weight. Sample" " weights are only used for the calibration" " itself." % estimator_name ) if sample_weight is not None: fit_params["sample_weight"] = sample_weight if _routing_enabled(): routed_params = process_routing(self, "fit", **fit_params) else: routed_params = Bunch() routed_params.estimator = Bunch(fit={}, predict={}, score={}) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) routed_params.estimator.fit = {"sample_weight": sample_weight} n_inliers_best = 1 score_best = -np.inf inlier_mask_best = None X_inlier_best = None y_inlier_best = None inlier_best_idxs_subset = None self.n_skips_no_inliers_ = 0 self.n_skips_invalid_data_ = 0 self.n_skips_invalid_model_ = 0 # number of data samples n_samples = X.shape[0] sample_idxs = np.arange(n_samples) self.n_trials_ = 0 max_trials = self.max_trials while self.n_trials_ < max_trials: self.n_trials_ += 1 if ( self.n_skips_no_inliers_ + self.n_skips_invalid_data_ + self.n_skips_invalid_model_ ) > self.max_skips: break # choose random sample set subset_idxs = sample_without_replacement( n_samples, min_samples, random_state=random_state ) X_subset = X[subset_idxs] y_subset = y[subset_idxs] # check if random sample set is valid if self.is_data_valid is not None and not self.is_data_valid( X_subset, y_subset ): self.n_skips_invalid_data_ += 1 continue # cut `fit_params` down to `subset_idxs` fit_params_subset = _check_method_params( X, params=routed_params.estimator.fit, indices=subset_idxs ) # fit model for current random sample set estimator.fit(X_subset, y_subset, **fit_params_subset) # check if estimated model is valid if self.is_model_valid is not None and not self.is_model_valid( estimator, X_subset, y_subset ): self.n_skips_invalid_model_ += 1 continue # residuals of all data for current random sample model y_pred = estimator.predict(X) residuals_subset = loss_function(y, y_pred) # classify data into inliers and outliers inlier_mask_subset = residuals_subset <= residual_threshold n_inliers_subset = np.sum(inlier_mask_subset) # less inliers -> skip current random sample if n_inliers_subset < n_inliers_best: self.n_skips_no_inliers_ += 1 continue # extract inlier data set inlier_idxs_subset = sample_idxs[inlier_mask_subset] X_inlier_subset = X[inlier_idxs_subset] y_inlier_subset = y[inlier_idxs_subset] # cut `fit_params` down to `inlier_idxs_subset` score_params_inlier_subset = _check_method_params( X, params=routed_params.estimator.score, indices=inlier_idxs_subset ) # score of inlier data set score_subset = estimator.score( X_inlier_subset, y_inlier_subset, **score_params_inlier_subset, ) # same number of inliers but worse score -> skip current random # sample if n_inliers_subset == n_inliers_best and score_subset < score_best: continue # save current random sample as best sample n_inliers_best = n_inliers_subset score_best = score_subset inlier_mask_best = inlier_mask_subset X_inlier_best = X_inlier_subset y_inlier_best = y_inlier_subset inlier_best_idxs_subset = inlier_idxs_subset max_trials = min( max_trials, _dynamic_max_trials( n_inliers_best, n_samples, min_samples, self.stop_probability ), ) # break if sufficient number of inliers or score is reached if n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score: break # if none of the iterations met the required criteria if inlier_mask_best is None: if ( self.n_skips_no_inliers_ + self.n_skips_invalid_data_ + self.n_skips_invalid_model_ ) > self.max_skips: raise ValueError( "RANSAC skipped more iterations than `max_skips` without" " finding a valid consensus set. Iterations were skipped" " because each randomly chosen sub-sample failed the" " passing criteria. See estimator attributes for" " diagnostics (n_skips*)." ) else: raise ValueError( "RANSAC could not find a valid consensus set. All" " `max_trials` iterations were skipped because each" " randomly chosen sub-sample failed the passing criteria." " See estimator attributes for diagnostics (n_skips*)." ) else: if ( self.n_skips_no_inliers_ + self.n_skips_invalid_data_ + self.n_skips_invalid_model_ ) > self.max_skips: warnings.warn( ( "RANSAC found a valid consensus set but exited" " early due to skipping more iterations than" " `max_skips`. See estimator attributes for" " diagnostics (n_skips*)." ), ConvergenceWarning, ) # estimate final model using all inliers fit_params_best_idxs_subset = _check_method_params( X, params=routed_params.estimator.fit, indices=inlier_best_idxs_subset ) estimator.fit(X_inlier_best, y_inlier_best, **fit_params_best_idxs_subset) self.estimator_ = estimator self.inlier_mask_ = inlier_mask_best return self
RANSACRegressor.fit
scikit-learn
160
sklearn/linear_model/_ransac.py
def predict(self, X, **params): """Predict using the estimated model. This is a wrapper for `estimator_.predict(X)`. Parameters ---------- X : {array-like or sparse matrix} of shape (n_samples, n_features) Input data. **params : dict Parameters routed to the `predict` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : array, shape = [n_samples] or [n_samples, n_targets] Returns predicted values. """
/usr/src/app/target_test_cases/failed_tests_RANSACRegressor.predict.txt
def predict(self, X, **params): """Predict using the estimated model. This is a wrapper for `estimator_.predict(X)`. Parameters ---------- X : {array-like or sparse matrix} of shape (n_samples, n_features) Input data. **params : dict Parameters routed to the `predict` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : array, shape = [n_samples] or [n_samples, n_targets] Returns predicted values. """ check_is_fitted(self) X = validate_data( self, X, ensure_all_finite=False, accept_sparse=True, reset=False, ) _raise_for_params(params, self, "predict") if _routing_enabled(): predict_params = process_routing(self, "predict", **params).estimator[ "predict" ] else: predict_params = {} return self.estimator_.predict(X, **predict_params)
RANSACRegressor.predict
scikit-learn
161
sklearn/linear_model/_ransac.py
def score(self, X, y, **params): """Return the score of the prediction. This is a wrapper for `estimator_.score(X, y)`. Parameters ---------- X : (array-like or sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. **params : dict Parameters routed to the `score` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- z : float Score of the prediction. """
/usr/src/app/target_test_cases/failed_tests_RANSACRegressor.score.txt
def score(self, X, y, **params): """Return the score of the prediction. This is a wrapper for `estimator_.score(X, y)`. Parameters ---------- X : (array-like or sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. **params : dict Parameters routed to the `score` method of the sub-estimator via the metadata routing API. .. versionadded:: 1.5 Only available if `sklearn.set_config(enable_metadata_routing=True)` is set. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- z : float Score of the prediction. """ check_is_fitted(self) X = validate_data( self, X, ensure_all_finite=False, accept_sparse=True, reset=False, ) _raise_for_params(params, self, "score") if _routing_enabled(): score_params = process_routing(self, "score", **params).estimator["score"] else: score_params = {} return self.estimator_.score(X, y, **score_params)
RANSACRegressor.score
scikit-learn
162
sklearn/kernel_approximation.py
def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_RBFSampler.fit.txt
def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, accept_sparse="csr") random_state = check_random_state(self.random_state) n_features = X.shape[1] sparse = sp.issparse(X) if self.gamma == "scale": # var = E[X^2] - E[X]^2 if sparse X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var() self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0 else: self._gamma = self.gamma self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal( size=(n_features, self.n_components) ) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) if X.dtype == np.float32: # Setting the data type of the fitted attribute will ensure the # output data type during `transform`. self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) self._n_features_out = self.n_components return self
RBFSampler.fit
scikit-learn
163
sklearn/kernel_approximation.py
def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_RBFSampler.transform.txt
def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """ check_is_fitted(self) X = validate_data(self, X, accept_sparse="csr", reset=False) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= (2.0 / self.n_components) ** 0.5 return projection
RBFSampler.transform
scikit-learn
164
sklearn/feature_selection/_rfe.py
def predict(self, X, **predict_params): """Reduce X to the selected features and predict using the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. **predict_params : dict Parameters to route to the ``predict`` method of the underlying estimator. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : array of shape [n_samples] The predicted target values. """
/usr/src/app/target_test_cases/failed_tests_RFE.predict.txt
def predict(self, X, **predict_params): """Reduce X to the selected features and predict using the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. **predict_params : dict Parameters to route to the ``predict`` method of the underlying estimator. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : array of shape [n_samples] The predicted target values. """ _raise_for_params(predict_params, self, "predict") check_is_fitted(self) if _routing_enabled(): routed_params = process_routing(self, "predict", **predict_params) else: routed_params = Bunch(estimator=Bunch(predict={})) return self.estimator_.predict( self.transform(X), **routed_params.estimator.predict )
RFE.predict
scikit-learn
165
sklearn/feature_selection/_rfe.py
def score(self, X, y, **score_params): """Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **score_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the ``score`` method of the underlying estimator. - If `enable_metadata_routing=True`: Parameters safely routed to the `score` method of the underlying estimator. .. versionadded:: 1.0 .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- score : float Score of the underlying base estimator computed with the selected features returned by `rfe.transform(X)` and `y`. """
/usr/src/app/target_test_cases/failed_tests_RFE.score.txt
def score(self, X, y, **score_params): """Reduce X to the selected features and return the score of the estimator. Parameters ---------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The target values. **score_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the ``score`` method of the underlying estimator. - If `enable_metadata_routing=True`: Parameters safely routed to the `score` method of the underlying estimator. .. versionadded:: 1.0 .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- score : float Score of the underlying base estimator computed with the selected features returned by `rfe.transform(X)` and `y`. """ check_is_fitted(self) if _routing_enabled(): routed_params = process_routing(self, "score", **score_params) else: routed_params = Bunch(estimator=Bunch(score=score_params)) return self.estimator_.score( self.transform(X), y, **routed_params.estimator.score )
RFE.score
scikit-learn
166
sklearn/feature_selection/_rfe.py
def fit(self, X, y, *, groups=None, **params): """Fit the RFE model and automatically tune the number of selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the total number of features. y : array-like of shape (n_samples,) Target values (integers for classification, real numbers for regression). groups : array-like of shape (n_samples,) or None, default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`~sklearn.model_selection.GroupKFold`). .. versionadded:: 0.20 **params : dict of str -> object Parameters passed to the ``fit`` method of the estimator, the scorer, and the CV splitter. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_RFECV.fit.txt
def fit(self, X, y, *, groups=None, **params): """Fit the RFE model and automatically tune the number of selected features. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the total number of features. y : array-like of shape (n_samples,) Target values (integers for classification, real numbers for regression). groups : array-like of shape (n_samples,) or None, default=None Group labels for the samples used while splitting the dataset into train/test set. Only used in conjunction with a "Group" :term:`cv` instance (e.g., :class:`~sklearn.model_selection.GroupKFold`). .. versionadded:: 0.20 **params : dict of str -> object Parameters passed to the ``fit`` method of the estimator, the scorer, and the CV splitter. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ _raise_for_params(params, self, "fit") X, y = validate_data( self, X, y, accept_sparse="csr", ensure_min_features=2, ensure_all_finite=False, multi_output=True, ) if _routing_enabled(): if groups is not None: params.update({"groups": groups}) routed_params = process_routing(self, "fit", **params) else: routed_params = Bunch( estimator=Bunch(fit={}), splitter=Bunch(split={"groups": groups}), scorer=Bunch(score={}), ) # Initialization cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) scorer = self._get_scorer() # Build an RFE object, which will evaluate and score each possible # feature count, down to self.min_features_to_select n_features = X.shape[1] if self.min_features_to_select > n_features: warnings.warn( ( f"Found min_features_to_select={self.min_features_to_select} > " f"{n_features=}. There will be no feature selection and all " "features will be kept." ), UserWarning, ) rfe = RFE( estimator=self.estimator, n_features_to_select=min(self.min_features_to_select, n_features), importance_getter=self.importance_getter, step=self.step, verbose=self.verbose, ) # Determine the number of subsets of features by fitting across # the train folds and choosing the "features_to_select" parameter # that gives the least averaged error across all folds. # Note that joblib raises a non-picklable error for bound methods # even if n_jobs is set to 1 with the default multiprocessing # backend. # This branching is done so that to # make sure that user code that sets n_jobs to 1 # and provides bound methods as scorers is not broken with the # addition of n_jobs parameter in version 0.18. if effective_n_jobs(self.n_jobs) == 1: parallel, func = list, _rfe_single_fit else: parallel = Parallel(n_jobs=self.n_jobs) func = delayed(_rfe_single_fit) scores_features = parallel( func(rfe, self.estimator, X, y, train, test, scorer, routed_params) for train, test in cv.split(X, y, **routed_params.splitter.split) ) scores, step_n_features = zip(*scores_features) step_n_features_rev = np.array(step_n_features[0])[::-1] scores = np.array(scores) # Reverse order such that lowest number of features is selected in case of tie. scores_sum_rev = np.sum(scores, axis=0)[::-1] n_features_to_select = step_n_features_rev[np.argmax(scores_sum_rev)] # Re-execute an elimination with best_k over the whole set rfe = RFE( estimator=self.estimator, n_features_to_select=n_features_to_select, step=self.step, importance_getter=self.importance_getter, verbose=self.verbose, ) rfe.fit(X, y, **routed_params.estimator.fit) # Set final attributes self.support_ = rfe.support_ self.n_features_ = rfe.n_features_ self.ranking_ = rfe.ranking_ self.estimator_ = clone(self.estimator) self.estimator_.fit(self._transform(X), y, **routed_params.estimator.fit) # reverse to stay consistent with before scores_rev = scores[:, ::-1] self.cv_results_ = { "mean_test_score": np.mean(scores_rev, axis=0), "std_test_score": np.std(scores_rev, axis=0), **{f"split{i}_test_score": scores_rev[i] for i in range(scores.shape[0])}, "n_features": step_n_features_rev, } return self
RFECV.fit
scikit-learn
167
sklearn/ensemble/_forest.py
def fit_transform(self, X, y=None, sample_weight=None): """ Fit estimator and transform dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data used to build forests. Use ``dtype=np.float32`` for maximum efficiency. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- X_transformed : sparse matrix of shape (n_samples, n_out) Transformed dataset. """
/usr/src/app/target_test_cases/failed_tests_RandomTreesEmbedding.fit_transform.txt
def fit_transform(self, X, y=None, sample_weight=None): """ Fit estimator and transform dataset. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data used to build forests. Use ``dtype=np.float32`` for maximum efficiency. y : Ignored Not used, present for API consistency by convention. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. In the case of classification, splits are also ignored if they would result in any single class carrying a negative weight in either child node. Returns ------- X_transformed : sparse matrix of shape (n_samples, n_out) Transformed dataset. """ rnd = check_random_state(self.random_state) y = rnd.uniform(size=_num_samples(X)) super().fit(X, y, sample_weight=sample_weight) self.one_hot_encoder_ = OneHotEncoder(sparse_output=self.sparse_output) output = self.one_hot_encoder_.fit_transform(self.apply(X)) self._n_features_out = output.shape[1] return output
RandomTreesEmbedding.fit_transform
scikit-learn
168
sklearn/feature_selection/_from_model.py
def fit(self, X, y=None, **fit_params): """Fit the SelectFromModel meta-transformer. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,), default=None The target values (integers that correspond to classes in classification, real numbers in regression). **fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `fit` method of the sub-estimator. They are ignored if `prefit=True`. - If `enable_metadata_routing=True`: Parameters safely routed to the `fit` method of the sub-estimator. They are ignored if `prefit=True`. .. versionchanged:: 1.4 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_SelectFromModel.fit.txt
def fit(self, X, y=None, **fit_params): """Fit the SelectFromModel meta-transformer. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,), default=None The target values (integers that correspond to classes in classification, real numbers in regression). **fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `fit` method of the sub-estimator. They are ignored if `prefit=True`. - If `enable_metadata_routing=True`: Parameters safely routed to the `fit` method of the sub-estimator. They are ignored if `prefit=True`. .. versionchanged:: 1.4 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ self._check_max_features(X) if self.prefit: try: check_is_fitted(self.estimator) except NotFittedError as exc: raise NotFittedError( "When `prefit=True`, `estimator` is expected to be a fitted " "estimator." ) from exc self.estimator_ = deepcopy(self.estimator) else: if _routing_enabled(): routed_params = process_routing(self, "fit", **fit_params) self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **routed_params.estimator.fit) else: # TODO(SLEP6): remove when metadata routing cannot be disabled. self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **fit_params) if hasattr(self.estimator_, "feature_names_in_"): self.feature_names_in_ = self.estimator_.feature_names_in_ else: _check_feature_names(self, X, reset=True) return self
SelectFromModel.fit
scikit-learn
169
sklearn/feature_selection/_from_model.py
def partial_fit(self, X, y=None, **partial_fit_params): """Fit the SelectFromModel meta-transformer only once. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,), default=None The target values (integers that correspond to classes in classification, real numbers in regression). **partial_fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `partial_fit` method of the sub-estimator. - If `enable_metadata_routing=True`: Parameters passed to the `partial_fit` method of the sub-estimator. They are ignored if `prefit=True`. .. versionchanged:: 1.4 `**partial_fit_params` are routed to the sub-estimator, if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`, which allows for aliasing. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_SelectFromModel.partial_fit.txt
def partial_fit(self, X, y=None, **partial_fit_params): """Fit the SelectFromModel meta-transformer only once. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,), default=None The target values (integers that correspond to classes in classification, real numbers in regression). **partial_fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `partial_fit` method of the sub-estimator. - If `enable_metadata_routing=True`: Parameters passed to the `partial_fit` method of the sub-estimator. They are ignored if `prefit=True`. .. versionchanged:: 1.4 `**partial_fit_params` are routed to the sub-estimator, if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`, which allows for aliasing. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ first_call = not hasattr(self, "estimator_") if first_call: self._check_max_features(X) if self.prefit: if first_call: try: check_is_fitted(self.estimator) except NotFittedError as exc: raise NotFittedError( "When `prefit=True`, `estimator` is expected to be a fitted " "estimator." ) from exc self.estimator_ = deepcopy(self.estimator) return self if first_call: self.estimator_ = clone(self.estimator) if _routing_enabled(): routed_params = process_routing(self, "partial_fit", **partial_fit_params) self.estimator_ = clone(self.estimator) self.estimator_.partial_fit(X, y, **routed_params.estimator.partial_fit) else: # TODO(SLEP6): remove when metadata routing cannot be disabled. self.estimator_.partial_fit(X, y, **partial_fit_params) if hasattr(self.estimator_, "feature_names_in_"): self.feature_names_in_ = self.estimator_.feature_names_in_ else: _check_feature_names(self, X, reset=first_call) return self
SelectFromModel.partial_fit
scikit-learn
170
sklearn/semi_supervised/_self_training.py
def fit(self, X, y, **params): """ Fit self-training classifier using `X`, `y` as training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. y : {array-like, sparse matrix} of shape (n_samples,) Array representing the labels. Unlabeled samples should have the label -1. **params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_SelfTrainingClassifier.fit.txt
def fit(self, X, y, **params): """ Fit self-training classifier using `X`, `y` as training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. y : {array-like, sparse matrix} of shape (n_samples,) Array representing the labels. Unlabeled samples should have the label -1. **params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ _raise_for_params(params, self, "fit") self.estimator_ = self._get_estimator() # we need row slicing support for sparse matrices, but costly finiteness check # can be delegated to the base estimator. X, y = validate_data( self, X, y, accept_sparse=["csr", "csc", "lil", "dok"], ensure_all_finite=False, ) if y.dtype.kind in ["U", "S"]: raise ValueError( "y has dtype string. If you wish to predict on " "string targets, use dtype object, and use -1" " as the label for unlabeled samples." ) has_label = y != -1 if np.all(has_label): warnings.warn("y contains no unlabeled samples", UserWarning) if self.criterion == "k_best" and ( self.k_best > X.shape[0] - np.sum(has_label) ): warnings.warn( ( "k_best is larger than the amount of unlabeled " "samples. All unlabeled samples will be labeled in " "the first iteration" ), UserWarning, ) if _routing_enabled(): routed_params = process_routing(self, "fit", **params) else: routed_params = Bunch(estimator=Bunch(fit={})) self.transduction_ = np.copy(y) self.labeled_iter_ = np.full_like(y, -1) self.labeled_iter_[has_label] = 0 self.n_iter_ = 0 while not np.all(has_label) and ( self.max_iter is None or self.n_iter_ < self.max_iter ): self.n_iter_ += 1 self.estimator_.fit( X[safe_mask(X, has_label)], self.transduction_[has_label], **routed_params.estimator.fit, ) # Predict on the unlabeled samples prob = self.estimator_.predict_proba(X[safe_mask(X, ~has_label)]) pred = self.estimator_.classes_[np.argmax(prob, axis=1)] max_proba = np.max(prob, axis=1) # Select new labeled samples if self.criterion == "threshold": selected = max_proba > self.threshold else: n_to_select = min(self.k_best, max_proba.shape[0]) if n_to_select == max_proba.shape[0]: selected = np.ones_like(max_proba, dtype=bool) else: # NB these are indices, not a mask selected = np.argpartition(-max_proba, n_to_select)[:n_to_select] # Map selected indices into original array selected_full = np.nonzero(~has_label)[0][selected] # Add newly labeled confident predictions to the dataset self.transduction_[selected_full] = pred[selected] has_label[selected_full] = True self.labeled_iter_[selected_full] = self.n_iter_ if selected_full.shape[0] == 0: # no changed labels self.termination_condition_ = "no_change" break if self.verbose: print( f"End of iteration {self.n_iter_}," f" added {selected_full.shape[0]} new labels." ) if self.n_iter_ == self.max_iter: self.termination_condition_ = "max_iter" if np.all(has_label): self.termination_condition_ = "all_labeled" self.estimator_.fit( X[safe_mask(X, has_label)], self.transduction_[has_label], **routed_params.estimator.fit, ) self.classes_ = self.estimator_.classes_ return self
SelfTrainingClassifier.fit
scikit-learn
171
sklearn/semi_supervised/_self_training.py
def predict(self, X, **params): """Predict the classes of `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's ``predict`` method. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : ndarray of shape (n_samples,) Array with predicted labels. """
/usr/src/app/target_test_cases/failed_tests_SelfTrainingClassifier.predict.txt
def predict(self, X, **params): """Predict the classes of `X`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's ``predict`` method. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : ndarray of shape (n_samples,) Array with predicted labels. """ check_is_fitted(self) _raise_for_params(params, self, "predict") if _routing_enabled(): # metadata routing is enabled. routed_params = process_routing(self, "predict", **params) else: routed_params = Bunch(estimator=Bunch(predict={})) X = validate_data( self, X, accept_sparse=True, ensure_all_finite=False, reset=False, ) return self.estimator_.predict(X, **routed_params.estimator.predict)
SelfTrainingClassifier.predict
scikit-learn
172
sklearn/semi_supervised/_self_training.py
def predict_proba(self, X, **params): """Predict probability for each possible outcome. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's ``predict_proba`` method. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : ndarray of shape (n_samples, n_features) Array with prediction probabilities. """
/usr/src/app/target_test_cases/failed_tests_SelfTrainingClassifier.predict_proba.txt
def predict_proba(self, X, **params): """Predict probability for each possible outcome. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array representing the data. **params : dict of str -> object Parameters to pass to the underlying estimator's ``predict_proba`` method. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y : ndarray of shape (n_samples, n_features) Array with prediction probabilities. """ check_is_fitted(self) _raise_for_params(params, self, "predict_proba") if _routing_enabled(): # metadata routing is enabled. routed_params = process_routing(self, "predict_proba", **params) else: routed_params = Bunch(estimator=Bunch(predict_proba={})) X = validate_data( self, X, accept_sparse=True, ensure_all_finite=False, reset=False, ) return self.estimator_.predict_proba(X, **routed_params.estimator.predict_proba)
SelfTrainingClassifier.predict_proba
scikit-learn
173
sklearn/feature_selection/_sequential.py
def fit(self, X, y=None, **params): """Learn the features to select from X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of predictors. y : array-like of shape (n_samples,), default=None Target values. This parameter may be ignored for unsupervised learning. **params : dict, default=None Parameters to be passed to the underlying `estimator`, `cv` and `scorer` objects. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_SequentialFeatureSelector.fit.txt
def fit(self, X, y=None, **params): """Learn the features to select from X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of predictors. y : array-like of shape (n_samples,), default=None Target values. This parameter may be ignored for unsupervised learning. **params : dict, default=None Parameters to be passed to the underlying `estimator`, `cv` and `scorer` objects. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns the instance itself. """ _raise_for_params(params, self, "fit") tags = self.__sklearn_tags__() X = validate_data( self, X, accept_sparse="csc", ensure_min_features=2, ensure_all_finite=not tags.input_tags.allow_nan, ) n_features = X.shape[1] if self.n_features_to_select == "auto": if self.tol is not None: # With auto feature selection, `n_features_to_select_` will be updated # to `support_.sum()` after features are selected. self.n_features_to_select_ = n_features - 1 else: self.n_features_to_select_ = n_features // 2 elif isinstance(self.n_features_to_select, Integral): if self.n_features_to_select >= n_features: raise ValueError("n_features_to_select must be < n_features.") self.n_features_to_select_ = self.n_features_to_select elif isinstance(self.n_features_to_select, Real): self.n_features_to_select_ = int(n_features * self.n_features_to_select) if self.tol is not None and self.tol < 0 and self.direction == "forward": raise ValueError( "tol must be strictly positive when doing forward selection" ) cv = check_cv(self.cv, y, classifier=is_classifier(self.estimator)) cloned_estimator = clone(self.estimator) # the current mask corresponds to the set of features: # - that we have already *selected* if we do forward selection # - that we have already *excluded* if we do backward selection current_mask = np.zeros(shape=n_features, dtype=bool) n_iterations = ( self.n_features_to_select_ if self.n_features_to_select == "auto" or self.direction == "forward" else n_features - self.n_features_to_select_ ) old_score = -np.inf is_auto_select = self.tol is not None and self.n_features_to_select == "auto" # We only need to verify the routing here and not use the routed params # because internally the actual routing will also take place inside the # `cross_val_score` function. if _routing_enabled(): process_routing(self, "fit", **params) for _ in range(n_iterations): new_feature_idx, new_score = self._get_best_new_feature_score( cloned_estimator, X, y, cv, current_mask, **params ) if is_auto_select and ((new_score - old_score) < self.tol): break old_score = new_score current_mask[new_feature_idx] = True if self.direction == "backward": current_mask = ~current_mask self.support_ = current_mask self.n_features_to_select_ = self.support_.sum() return self
SequentialFeatureSelector.fit
scikit-learn
174
sklearn/covariance/_shrunk_covariance.py
def fit(self, X, y=None): """Fit the shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_ShrunkCovariance.fit.txt
def fit(self, X, y=None): """Fit the shrunk covariance model to X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X) # Not calling the parent object to fit, to avoid a potential # matrix inversion when setting the precision if self.assume_centered: self.location_ = np.zeros(X.shape[1]) else: self.location_ = X.mean(0) covariance = empirical_covariance(X, assume_centered=self.assume_centered) covariance = shrunk_covariance(covariance, self.shrinkage) self._set_covariance(covariance) return self
ShrunkCovariance.fit
scikit-learn
175
sklearn/impute/_base.py
def fit(self, X, y=None): """Fit the imputer on `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_SimpleImputer.fit.txt
def fit(self, X, y=None): """Fit the imputer on `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data, where `n_samples` is the number of samples and `n_features` is the number of features. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted estimator. """ X = self._validate_input(X, in_fit=True) # default fill_value is 0 for numerical input and "missing_value" # otherwise if self.fill_value is None: if X.dtype.kind in ("i", "u", "f"): fill_value = 0 else: fill_value = "missing_value" else: fill_value = self.fill_value if sp.issparse(X): self.statistics_ = self._sparse_fit( X, self.strategy, self.missing_values, fill_value ) else: self.statistics_ = self._dense_fit( X, self.strategy, self.missing_values, fill_value ) return self
SimpleImputer.fit
scikit-learn
176
sklearn/impute/_base.py
def inverse_transform(self, X): """Convert the data back to the original representation. Inverts the `transform` operation performed on an array. This operation can only be performed after :class:`SimpleImputer` is instantiated with `add_indicator=True`. Note that `inverse_transform` can only invert the transform in features that have binary indicators for missing values. If a feature has no missing values at `fit` time, the feature won't have a binary indicator, and the imputation done at `transform` time won't be inverted. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape \ (n_samples, n_features + n_features_missing_indicator) The imputed data to be reverted to original data. It has to be an augmented array of imputed data and the missing indicator mask. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original `X` with missing values as it was prior to imputation. """
/usr/src/app/target_test_cases/failed_tests_SimpleImputer.inverse_transform.txt
def inverse_transform(self, X): """Convert the data back to the original representation. Inverts the `transform` operation performed on an array. This operation can only be performed after :class:`SimpleImputer` is instantiated with `add_indicator=True`. Note that `inverse_transform` can only invert the transform in features that have binary indicators for missing values. If a feature has no missing values at `fit` time, the feature won't have a binary indicator, and the imputation done at `transform` time won't be inverted. .. versionadded:: 0.24 Parameters ---------- X : array-like of shape \ (n_samples, n_features + n_features_missing_indicator) The imputed data to be reverted to original data. It has to be an augmented array of imputed data and the missing indicator mask. Returns ------- X_original : ndarray of shape (n_samples, n_features) The original `X` with missing values as it was prior to imputation. """ check_is_fitted(self) if not self.add_indicator: raise ValueError( "'inverse_transform' works only when " "'SimpleImputer' is instantiated with " "'add_indicator=True'. " f"Got 'add_indicator={self.add_indicator}' " "instead." ) n_features_missing = len(self.indicator_.features_) non_empty_feature_count = X.shape[1] - n_features_missing array_imputed = X[:, :non_empty_feature_count].copy() missing_mask = X[:, non_empty_feature_count:].astype(bool) n_features_original = len(self.statistics_) shape_original = (X.shape[0], n_features_original) X_original = np.zeros(shape_original) X_original[:, self.indicator_.features_] = missing_mask full_mask = X_original.astype(bool) imputed_idx, original_idx = 0, 0 while imputed_idx < len(array_imputed.T): if not np.all(X_original[:, original_idx]): X_original[:, original_idx] = array_imputed.T[imputed_idx] imputed_idx += 1 original_idx += 1 else: original_idx += 1 X_original[full_mask] = self.missing_values return X_original
SimpleImputer.inverse_transform
scikit-learn
177
sklearn/impute/_base.py
def transform(self, X): """Impute all missing values in `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data to complete. Returns ------- X_imputed : {ndarray, sparse matrix} of shape \ (n_samples, n_features_out) `X` with imputed values. """
/usr/src/app/target_test_cases/failed_tests_SimpleImputer.transform.txt
def transform(self, X): """Impute all missing values in `X`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) The input data to complete. Returns ------- X_imputed : {ndarray, sparse matrix} of shape \ (n_samples, n_features_out) `X` with imputed values. """ check_is_fitted(self) X = self._validate_input(X, in_fit=False) statistics = self.statistics_ if X.shape[1] != statistics.shape[0]: raise ValueError( "X has %d features per sample, expected %d" % (X.shape[1], self.statistics_.shape[0]) ) # compute mask before eliminating invalid features missing_mask = _get_mask(X, self.missing_values) # Decide whether to keep missing features if self.strategy == "constant" or self.keep_empty_features: valid_statistics = statistics valid_statistics_indexes = None else: # same as np.isnan but also works for object dtypes invalid_mask = _get_mask(statistics, np.nan) valid_mask = np.logical_not(invalid_mask) valid_statistics = statistics[valid_mask] valid_statistics_indexes = np.flatnonzero(valid_mask) if invalid_mask.any(): invalid_features = np.arange(X.shape[1])[invalid_mask] # use feature names warning if features are provided if hasattr(self, "feature_names_in_"): invalid_features = self.feature_names_in_[invalid_features] warnings.warn( "Skipping features without any observed values:" f" {invalid_features}. At least one non-missing value is needed" f" for imputation with strategy='{self.strategy}'." ) X = X[:, valid_statistics_indexes] # Do actual imputation if sp.issparse(X): if self.missing_values == 0: raise ValueError( "Imputation not possible when missing_values " "== 0 and input is sparse. Provide a dense " "array instead." ) else: # if no invalid statistics are found, use the mask computed # before, else recompute mask if valid_statistics_indexes is None: mask = missing_mask.data else: mask = _get_mask(X.data, self.missing_values) indexes = np.repeat( np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr) )[mask] X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) else: # use mask computed before eliminating invalid mask if valid_statistics_indexes is None: mask_valid_features = missing_mask else: mask_valid_features = missing_mask[:, valid_statistics_indexes] n_missing = np.sum(mask_valid_features, axis=0) values = np.repeat(valid_statistics, n_missing) coordinates = np.where(mask_valid_features.transpose())[::-1] X[coordinates] = values X_indicator = super()._transform_indicator(missing_mask) return super()._concatenate_indicator(X, X_indicator)
SimpleImputer.transform
scikit-learn
178
sklearn/kernel_approximation.py
def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_SkewedChi2Sampler.fit.txt
def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X) random_state = check_random_state(self.random_state) n_features = X.shape[1] uniform = random_state.uniform(size=(n_features, self.n_components)) # transform by inverse CDF of sech self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform)) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) if X.dtype == np.float32: # Setting the data type of the fitted attribute will ensure the # output data type during `transform`. self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) self._n_features_out = self.n_components return self
SkewedChi2Sampler.fit
scikit-learn
179
sklearn/kernel_approximation.py
def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. All values of X must be strictly greater than "-skewedness". Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_SkewedChi2Sampler.transform.txt
def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. All values of X must be strictly greater than "-skewedness". Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """ check_is_fitted(self) X = validate_data( self, X, copy=True, dtype=[np.float64, np.float32], reset=False ) if (X <= -self.skewedness).any(): raise ValueError("X may not contain entries smaller than -skewedness.") X += self.skewedness np.log(X, X) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.0) / np.sqrt(self.n_components) return projection
SkewedChi2Sampler.transform
scikit-learn
180
sklearn/cluster/_spectral.py
def fit(self, X, y=None): """Perform spectral clustering from features, or affinity matrix. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) Training instances to cluster, similarities / affinities between instances if ``affinity='precomputed'``, or distances between instances if ``affinity='precomputed_nearest_neighbors``. If a sparse matrix is provided in a format other than ``csr_matrix``, ``csc_matrix``, or ``coo_matrix``, it will be converted into a sparse ``csr_matrix``. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object A fitted instance of the estimator. """
/usr/src/app/target_test_cases/failed_tests_SpectralClustering.fit.txt
def fit(self, X, y=None): """Perform spectral clustering from features, or affinity matrix. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) Training instances to cluster, similarities / affinities between instances if ``affinity='precomputed'``, or distances between instances if ``affinity='precomputed_nearest_neighbors``. If a sparse matrix is provided in a format other than ``csr_matrix``, ``csc_matrix``, or ``coo_matrix``, it will be converted into a sparse ``csr_matrix``. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object A fitted instance of the estimator. """ X = validate_data( self, X, accept_sparse=["csr", "csc", "coo"], dtype=np.float64, ensure_min_samples=2, ) allow_squared = self.affinity in [ "precomputed", "precomputed_nearest_neighbors", ] if X.shape[0] == X.shape[1] and not allow_squared: warnings.warn( "The spectral clustering API has changed. ``fit``" "now constructs an affinity matrix from data. To use" " a custom affinity matrix, " "set ``affinity=precomputed``." ) if self.affinity == "nearest_neighbors": connectivity = kneighbors_graph( X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs ) self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) elif self.affinity == "precomputed_nearest_neighbors": estimator = NearestNeighbors( n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed" ).fit(X) connectivity = estimator.kneighbors_graph(X=X, mode="connectivity") self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) elif self.affinity == "precomputed": self.affinity_matrix_ = X else: params = self.kernel_params if params is None: params = {} if not callable(self.affinity): params["gamma"] = self.gamma params["degree"] = self.degree params["coef0"] = self.coef0 self.affinity_matrix_ = pairwise_kernels( X, metric=self.affinity, filter_params=True, **params ) random_state = check_random_state(self.random_state) n_components = ( self.n_clusters if self.n_components is None else self.n_components ) # We now obtain the real valued solution matrix to the # relaxed Ncut problem, solving the eigenvalue problem # L_sym x = lambda x and recovering u = D^-1/2 x. # The first eigenvector is constant only for fully connected graphs # and should be kept for spectral clustering (drop_first = False) # See spectral_embedding documentation. maps = _spectral_embedding( self.affinity_matrix_, n_components=n_components, eigen_solver=self.eigen_solver, random_state=random_state, eigen_tol=self.eigen_tol, drop_first=False, ) if self.verbose: print(f"Computing label assignment using {self.assign_labels}") if self.assign_labels == "kmeans": _, self.labels_, _ = k_means( maps, self.n_clusters, random_state=random_state, n_init=self.n_init, verbose=self.verbose, ) elif self.assign_labels == "cluster_qr": self.labels_ = cluster_qr(maps) else: self.labels_ = discretize(maps, random_state=random_state) return self
SpectralClustering.fit
scikit-learn
181
sklearn/preprocessing/_polynomial.py
def fit(self, X, y=None, sample_weight=None): """Compute knot positions of splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : None Ignored. sample_weight : array-like of shape (n_samples,), default = None Individual weights for each sample. Used to calculate quantiles if `knots="quantile"`. For `knots="uniform"`, zero weighted observations are ignored for finding the min and max of `X`. Returns ------- self : object Fitted transformer. """
/usr/src/app/target_test_cases/failed_tests_SplineTransformer.fit.txt
def fit(self, X, y=None, sample_weight=None): """Compute knot positions of splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data. y : None Ignored. sample_weight : array-like of shape (n_samples,), default = None Individual weights for each sample. Used to calculate quantiles if `knots="quantile"`. For `knots="uniform"`, zero weighted observations are ignored for finding the min and max of `X`. Returns ------- self : object Fitted transformer. """ X = validate_data( self, X, reset=True, accept_sparse=False, ensure_min_samples=2, ensure_2d=True, ) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) _, n_features = X.shape if isinstance(self.knots, str): base_knots = self._get_base_knot_positions( X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight ) else: base_knots = check_array(self.knots, dtype=np.float64) if base_knots.shape[0] < 2: raise ValueError("Number of knots, knots.shape[0], must be >= 2.") elif base_knots.shape[1] != n_features: raise ValueError("knots.shape[1] == n_features is violated.") elif not np.all(np.diff(base_knots, axis=0) > 0): raise ValueError("knots must be sorted without duplicates.") if self.sparse_output and sp_version < parse_version("1.8.0"): raise ValueError( "Option sparse_output=True is only available with scipy>=1.8.0, " f"but here scipy=={sp_version} is used." ) # number of knots for base interval n_knots = base_knots.shape[0] if self.extrapolation == "periodic" and n_knots <= self.degree: raise ValueError( "Periodic splines require degree < n_knots. Got n_knots=" f"{n_knots} and degree={self.degree}." ) # number of splines basis functions if self.extrapolation != "periodic": n_splines = n_knots + self.degree - 1 else: # periodic splines have self.degree less degrees of freedom n_splines = n_knots - 1 degree = self.degree n_out = n_features * n_splines # We have to add degree number of knots below, and degree number knots # above the base knots in order to make the spline basis complete. if self.extrapolation == "periodic": # For periodic splines the spacing of the first / last degree knots # needs to be a continuation of the spacing of the last / first # base knots. period = base_knots[-1] - base_knots[0] knots = np.r_[ base_knots[-(degree + 1) : -1] - period, base_knots, base_knots[1 : (degree + 1)] + period, ] else: # Eilers & Marx in "Flexible smoothing with B-splines and # penalties" https://doi.org/10.1214/ss/1038425655 advice # against repeating first and last knot several times, which # would have inferior behaviour at boundaries if combined with # a penalty (hence P-Spline). We follow this advice even if our # splines are unpenalized. Meaning we do not: # knots = np.r_[ # np.tile(base_knots.min(axis=0), reps=[degree, 1]), # base_knots, # np.tile(base_knots.max(axis=0), reps=[degree, 1]) # ] # Instead, we reuse the distance of the 2 fist/last knots. dist_min = base_knots[1] - base_knots[0] dist_max = base_knots[-1] - base_knots[-2] knots = np.r_[ np.linspace( base_knots[0] - degree * dist_min, base_knots[0] - dist_min, num=degree, ), base_knots, np.linspace( base_knots[-1] + dist_max, base_knots[-1] + degree * dist_max, num=degree, ), ] # With a diagonal coefficient matrix, we get back the spline basis # elements, i.e. the design matrix of the spline. # Note, BSpline appreciates C-contiguous float64 arrays as c=coef. coef = np.eye(n_splines, dtype=np.float64) if self.extrapolation == "periodic": coef = np.concatenate((coef, coef[:degree, :])) extrapolate = self.extrapolation in ["periodic", "continue"] bsplines = [ BSpline.construct_fast( knots[:, i], coef, self.degree, extrapolate=extrapolate ) for i in range(n_features) ] self.bsplines_ = bsplines self.n_features_out_ = n_out - n_features * (1 - self.include_bias) return self
SplineTransformer.fit
scikit-learn
182
sklearn/preprocessing/_polynomial.py
def transform(self, X): """Transform each feature data to B-splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to transform. Returns ------- XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines) The matrix of features, where n_splines is the number of bases elements of the B-splines, n_knots + degree - 1. """
/usr/src/app/target_test_cases/failed_tests_SplineTransformer.transform.txt
def transform(self, X): """Transform each feature data to B-splines. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to transform. Returns ------- XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines) The matrix of features, where n_splines is the number of bases elements of the B-splines, n_knots + degree - 1. """ check_is_fitted(self) X = validate_data(self, X, reset=False, accept_sparse=False, ensure_2d=True) n_samples, n_features = X.shape n_splines = self.bsplines_[0].c.shape[1] degree = self.degree # TODO: Remove this condition, once scipy 1.10 is the minimum version. # Only scipy => 1.10 supports design_matrix(.., extrapolate=..). # The default (implicit in scipy < 1.10) is extrapolate=False. scipy_1_10 = sp_version >= parse_version("1.10.0") # Note: self.bsplines_[0].extrapolate is True for extrapolation in # ["periodic", "continue"] if scipy_1_10: use_sparse = self.sparse_output kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate} else: use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate kwargs_extrapolate = dict() # Note that scipy BSpline returns float64 arrays and converts input # x=X[:, i] to c-contiguous float64. n_out = self.n_features_out_ + n_features * (1 - self.include_bias) if X.dtype in FLOAT_DTYPES: dtype = X.dtype else: dtype = np.float64 if use_sparse: output_list = [] else: XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order) for i in range(n_features): spl = self.bsplines_[i] if self.extrapolation in ("continue", "error", "periodic"): if self.extrapolation == "periodic": # With periodic extrapolation we map x to the segment # [spl.t[k], spl.t[n]]. # This is equivalent to BSpline(.., extrapolate="periodic") # for scipy>=1.0.0. n = spl.t.size - spl.k - 1 # Assign to new array to avoid inplace operation x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % ( spl.t[n] - spl.t[spl.k] ) else: x = X[:, i] if use_sparse: XBS_sparse = BSpline.design_matrix( x, spl.t, spl.k, **kwargs_extrapolate ) if self.extrapolation == "periodic": # See the construction of coef in fit. We need to add the last # degree spline basis function to the first degree ones and # then drop the last ones. # Note: See comment about SparseEfficiencyWarning below. XBS_sparse = XBS_sparse.tolil() XBS_sparse[:, :degree] += XBS_sparse[:, -degree:] XBS_sparse = XBS_sparse[:, :-degree] else: XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x) else: # extrapolation in ("constant", "linear") xmin, xmax = spl.t[degree], spl.t[-degree - 1] # spline values at boundaries f_min, f_max = spl(xmin), spl(xmax) mask = (xmin <= X[:, i]) & (X[:, i] <= xmax) if use_sparse: mask_inv = ~mask x = X[:, i].copy() # Set some arbitrary values outside boundary that will be reassigned # later. x[mask_inv] = spl.t[self.degree] XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k) # Note: Without converting to lil_matrix we would get: # scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity # structure of a csr_matrix is expensive. lil_matrix is more # efficient. if np.any(mask_inv): XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask_inv, :] = 0 else: XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i]) # Note for extrapolation: # 'continue' is already returned as is by scipy BSplines if self.extrapolation == "error": # BSpline with extrapolate=False does not raise an error, but # outputs np.nan. if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or ( not use_sparse and np.any( np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)]) ) ): raise ValueError( "X contains values beyond the limits of the knots." ) elif self.extrapolation == "constant": # Set all values beyond xmin and xmax to the value of the # spline basis functions at those two positions. # Only the first degree and last degree number of splines # have non-zero values at the boundaries. mask = X[:, i] < xmin if np.any(mask): if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, :degree] = f_min[:degree] else: XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[ :degree ] mask = X[:, i] > xmax if np.any(mask): if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, -degree:] = f_max[-degree:] else: XBS[ mask, ((i + 1) * n_splines - degree) : ((i + 1) * n_splines), ] = f_max[-degree:] elif self.extrapolation == "linear": # Continue the degree first and degree last spline bases # linearly beyond the boundaries, with slope = derivative at # the boundary. # Note that all others have derivative = value = 0 at the # boundaries. # spline derivatives = slopes at boundaries fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1) # Compute the linear continuation. if degree <= 1: # For degree=1, the derivative of 2nd spline is not zero at # boundary. For degree=0 it is the same as 'constant'. degree += 1 for j in range(degree): mask = X[:, i] < xmin if np.any(mask): linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j] if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, j] = linear_extr else: XBS[mask, i * n_splines + j] = linear_extr mask = X[:, i] > xmax if np.any(mask): k = n_splines - 1 - j linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k] if use_sparse: # Note: See comment about SparseEfficiencyWarning above. XBS_sparse = XBS_sparse.tolil() XBS_sparse[mask, k : k + 1] = linear_extr[:, None] else: XBS[mask, i * n_splines + k] = linear_extr if use_sparse: XBS_sparse = XBS_sparse.tocsr() output_list.append(XBS_sparse) if use_sparse: # TODO: Remove this conditional error when the minimum supported version of # SciPy is 1.9.2 # `scipy.sparse.hstack` breaks in scipy<1.9.2 # when `n_features_out_ > max_int32` max_int32 = np.iinfo(np.int32).max all_int32 = True for mat in output_list: all_int32 &= mat.indices.dtype == np.int32 if ( sp_version < parse_version("1.9.2") and self.n_features_out_ > max_int32 and all_int32 ): raise ValueError( "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`" " produces negative columns when:\n1. The output shape contains" " `n_cols` too large to be represented by a 32bit signed" " integer.\n. All sub-matrices to be stacked have indices of" " dtype `np.int32`.\nTo avoid this error, either use a version" " of scipy `>=1.9.2` or alter the `SplineTransformer`" " transformer to produce fewer than 2^31 output features" ) XBS = sparse.hstack(output_list, format="csr") elif self.sparse_output: # TODO: Remove ones scipy 1.10 is the minimum version. See comments above. XBS = sparse.csr_matrix(XBS) if self.include_bias: return XBS else: # We throw away one spline basis per feature. # We chose the last one. indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0] return XBS[:, indices]
SplineTransformer.transform
scikit-learn
183
sklearn/ensemble/_stacking.py
def fit(self, X, y, *, sample_weight=None, **fit_params): """Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. Note that `y` will be internally encoded in numerically increasing order or lexicographic order. If the order matter (e.g. for ordinal regression), one should numerically encode the target `y` before calling :term:`fit`. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Note that this is supported only if all underlying estimators support sample weights. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns a fitted instance of estimator. """
/usr/src/app/target_test_cases/failed_tests_StackingClassifier.fit.txt
def fit(self, X, y, *, sample_weight=None, **fit_params): """Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. Note that `y` will be internally encoded in numerically increasing order or lexicographic order. If the order matter (e.g. for ordinal regression), one should numerically encode the target `y` before calling :term:`fit`. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Note that this is supported only if all underlying estimators support sample weights. **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.6 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns a fitted instance of estimator. """ _raise_for_params(fit_params, self, "fit") check_classification_targets(y) if type_of_target(y) == "multilabel-indicator": self._label_encoder = [LabelEncoder().fit(yk) for yk in y.T] self.classes_ = [le.classes_ for le in self._label_encoder] y_encoded = np.array( [ self._label_encoder[target_idx].transform(target) for target_idx, target in enumerate(y.T) ] ).T else: self._label_encoder = LabelEncoder().fit(y) self.classes_ = self._label_encoder.classes_ y_encoded = self._label_encoder.transform(y) if sample_weight is not None: fit_params["sample_weight"] = sample_weight return super().fit(X, y_encoded, **fit_params)
StackingClassifier.fit
scikit-learn
184
sklearn/ensemble/_stacking.py
def predict(self, X, **predict_params): """Predict target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. **predict_params : dict of str -> obj Parameters to the `predict` called by the `final_estimator`. Note that this may be used to return uncertainties from some estimators with `return_std` or `return_cov`. Be aware that it will only account for uncertainty in the final estimator. - If `enable_metadata_routing=False` (default): Parameters directly passed to the `predict` method of the `final_estimator`. - If `enable_metadata_routing=True`: Parameters safely routed to the `predict` method of the `final_estimator`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. .. versionchanged:: 1.6 `**predict_params` can be routed via metadata routing API. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) Predicted targets. """
/usr/src/app/target_test_cases/failed_tests_StackingClassifier.predict.txt
def predict(self, X, **predict_params): """Predict target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. **predict_params : dict of str -> obj Parameters to the `predict` called by the `final_estimator`. Note that this may be used to return uncertainties from some estimators with `return_std` or `return_cov`. Be aware that it will only account for uncertainty in the final estimator. - If `enable_metadata_routing=False` (default): Parameters directly passed to the `predict` method of the `final_estimator`. - If `enable_metadata_routing=True`: Parameters safely routed to the `predict` method of the `final_estimator`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. .. versionchanged:: 1.6 `**predict_params` can be routed via metadata routing API. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) Predicted targets. """ if _routing_enabled(): routed_params = process_routing(self, "predict", **predict_params) else: # TODO(SLEP6): remove when metadata routing cannot be disabled. routed_params = Bunch() routed_params.final_estimator_ = Bunch(predict={}) routed_params.final_estimator_.predict = predict_params y_pred = super().predict(X, **routed_params.final_estimator_["predict"]) if isinstance(self._label_encoder, list): # Handle the multilabel-indicator case y_pred = np.array( [ self._label_encoder[target_idx].inverse_transform(target) for target_idx, target in enumerate(y_pred.T) ] ).T else: y_pred = self._label_encoder.inverse_transform(y_pred) return y_pred
StackingClassifier.predict
scikit-learn
185
sklearn/ensemble/_stacking.py
def predict(self, X, **predict_params): """Predict target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. **predict_params : dict of str -> obj Parameters to the `predict` called by the `final_estimator`. Note that this may be used to return uncertainties from some estimators with `return_std` or `return_cov`. Be aware that it will only account for uncertainty in the final estimator. - If `enable_metadata_routing=False` (default): Parameters directly passed to the `predict` method of the `final_estimator`. - If `enable_metadata_routing=True`: Parameters safely routed to the `predict` method of the `final_estimator`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. .. versionchanged:: 1.6 `**predict_params` can be routed via metadata routing API. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) Predicted targets. """
/usr/src/app/target_test_cases/failed_tests_StackingRegressor.predict.txt
def predict(self, X, **predict_params): """Predict target for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. **predict_params : dict of str -> obj Parameters to the `predict` called by the `final_estimator`. Note that this may be used to return uncertainties from some estimators with `return_std` or `return_cov`. Be aware that it will only account for uncertainty in the final estimator. - If `enable_metadata_routing=False` (default): Parameters directly passed to the `predict` method of the `final_estimator`. - If `enable_metadata_routing=True`: Parameters safely routed to the `predict` method of the `final_estimator`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. .. versionchanged:: 1.6 `**predict_params` can be routed via metadata routing API. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_output) Predicted targets. """ if _routing_enabled(): routed_params = process_routing(self, "predict", **predict_params) else: # TODO(SLEP6): remove when metadata routing cannot be disabled. routed_params = Bunch() routed_params.final_estimator_ = Bunch(predict={}) routed_params.final_estimator_.predict = predict_params y_pred = super().predict(X, **routed_params.final_estimator_["predict"]) return y_pred
StackingRegressor.predict
scikit-learn
186
sklearn/preprocessing/_data.py
def inverse_transform(self, X, copy=None): """Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """
/usr/src/app/target_test_cases/failed_tests_StandardScaler.inverse_transform.txt
def inverse_transform(self, X, copy=None): """Scale back the data to the original representation. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data used to scale along the features axis. copy : bool, default=None Copy the input X or not. Returns ------- X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features) Transformed array. """ check_is_fitted(self) copy = copy if copy is not None else self.copy X = check_array( X, accept_sparse="csr", copy=copy, dtype=FLOAT_DTYPES, force_writeable=True, ensure_all_finite="allow-nan", ) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives." ) if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X
StandardScaler.inverse_transform
scikit-learn
187
sklearn/model_selection/_split.py
def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like of shape (n_samples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """
/usr/src/app/target_test_cases/failed_tests_StratifiedKFold.split.txt
def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like of shape (n_samples,) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """ if groups is not None: warnings.warn( f"The groups parameter is ignored by {self.__class__.__name__}", UserWarning, ) y = check_array(y, input_name="y", ensure_2d=False, dtype=None) return super().split(X, y, groups)
StratifiedKFold.split
scikit-learn
188
sklearn/model_selection/_split.py
def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like of shape (n_samples,) or (n_samples, n_labels) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """
/usr/src/app/target_test_cases/failed_tests_StratifiedShuffleSplit.split.txt
def split(self, X, y, groups=None): """Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Note that providing ``y`` is sufficient to generate the splits and hence ``np.zeros(n_samples)`` may be used as a placeholder for ``X`` instead of actual training data. y : array-like of shape (n_samples,) or (n_samples, n_labels) The target variable for supervised learning problems. Stratification is done based on the y labels. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting `random_state` to an integer. """ if groups is not None: warnings.warn( f"The groups parameter is ignored by {self.__class__.__name__}", UserWarning, ) y = check_array(y, input_name="y", ensure_2d=False, dtype=None) return super().split(X, y, groups)
StratifiedShuffleSplit.split
scikit-learn
189
sklearn/manifold/_t_sne.py
def fit_transform(self, X, y=None): """Fit X into an embedded space and return that transformed output. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. If the method is 'barnes_hut' and the metric is 'precomputed', X may be a precomputed sparse graph. y : None Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """
/usr/src/app/target_test_cases/failed_tests_TSNE.fit_transform.txt
def fit_transform(self, X, y=None): """Fit X into an embedded space and return that transformed output. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. If the method is 'barnes_hut' and the metric is 'precomputed', X may be a precomputed sparse graph. y : None Ignored. Returns ------- X_new : ndarray of shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ # TODO(1.7): remove # Also make sure to change `max_iter` default back to 1000 and deprecate None if self.n_iter != "deprecated": if self.max_iter is not None: raise ValueError( "Both 'n_iter' and 'max_iter' attributes were set. Attribute" " 'n_iter' was deprecated in version 1.5 and will be removed in" " 1.7. To avoid this error, only set the 'max_iter' attribute." ) warnings.warn( ( "'n_iter' was renamed to 'max_iter' in version 1.5 and " "will be removed in 1.7." ), FutureWarning, ) self._max_iter = self.n_iter elif self.max_iter is None: self._max_iter = 1000 else: self._max_iter = self.max_iter self._check_params_vs_input(X) embedding = self._fit(X) self.embedding_ = embedding return self.embedding_
TSNE.fit_transform
scikit-learn
190
sklearn/compose/_target.py
def fit(self, X, y, **fit_params): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. **fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `fit` method of the underlying regressor. - If `enable_metadata_routing=True`: Parameters safely routed to the `fit` method of the underlying regressor. .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """
/usr/src/app/target_test_cases/failed_tests_TransformedTargetRegressor.fit.txt
def fit(self, X, y, **fit_params): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. **fit_params : dict - If `enable_metadata_routing=False` (default): Parameters directly passed to the `fit` method of the underlying regressor. - If `enable_metadata_routing=True`: Parameters safely routed to the `fit` method of the underlying regressor. .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Fitted estimator. """ if y is None: raise ValueError( f"This {self.__class__.__name__} estimator " "requires y to be passed, but the target y is None." ) y = check_array( y, input_name="y", accept_sparse=False, ensure_all_finite=True, ensure_2d=False, dtype="numeric", allow_nd=True, ) # store the number of dimension of the target to predict an array of # similar shape at predict self._training_dim = y.ndim # transformers are designed to modify X which is 2d dimensional, we # need to modify y accordingly. if y.ndim == 1: y_2d = y.reshape(-1, 1) else: y_2d = y self._fit_transformer(y_2d) # transform y and convert back to 1d array if needed y_trans = self.transformer_.transform(y_2d) # FIXME: a FunctionTransformer can return a 1D array even when validate # is set to True. Therefore, we need to check the number of dimension # first. if y_trans.ndim == 2 and y_trans.shape[1] == 1: y_trans = y_trans.squeeze(axis=1) self.regressor_ = self._get_regressor(get_clone=True) if _routing_enabled(): routed_params = process_routing(self, "fit", **fit_params) else: routed_params = Bunch(regressor=Bunch(fit=fit_params)) self.regressor_.fit(X, y_trans, **routed_params.regressor.fit) if hasattr(self.regressor_, "feature_names_in_"): self.feature_names_in_ = self.regressor_.feature_names_in_ return self
TransformedTargetRegressor.fit
scikit-learn
191
sklearn/compose/_target.py
def predict(self, X, **predict_params): """Predict using the base regressor, applying inverse. The regressor is used to predict and the `inverse_func` or `inverse_transform` is applied before returning the prediction. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. **predict_params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters directly passed to the `predict` method of the underlying regressor. - If `enable_metadata_routing=True`: Parameters safely routed to the `predict` method of the underlying regressor. .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_hat : ndarray of shape (n_samples,) Predicted values. """
/usr/src/app/target_test_cases/failed_tests_TransformedTargetRegressor.predict.txt
def predict(self, X, **predict_params): """Predict using the base regressor, applying inverse. The regressor is used to predict and the `inverse_func` or `inverse_transform` is applied before returning the prediction. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. **predict_params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters directly passed to the `predict` method of the underlying regressor. - If `enable_metadata_routing=True`: Parameters safely routed to the `predict` method of the underlying regressor. .. versionchanged:: 1.6 See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- y_hat : ndarray of shape (n_samples,) Predicted values. """ check_is_fitted(self) if _routing_enabled(): routed_params = process_routing(self, "predict", **predict_params) else: routed_params = Bunch(regressor=Bunch(predict=predict_params)) pred = self.regressor_.predict(X, **routed_params.regressor.predict) if pred.ndim == 1: pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1)) else: pred_trans = self.transformer_.inverse_transform(pred) if ( self._training_dim == 1 and pred_trans.ndim == 2 and pred_trans.shape[1] == 1 ): pred_trans = pred_trans.squeeze(axis=1) return pred_trans
TransformedTargetRegressor.predict
scikit-learn
192
sklearn/ensemble/_hist_gradient_boosting/grower.py
def make_predictor(self, binning_thresholds): """Make a TreePredictor object out of the current tree. Parameters ---------- binning_thresholds : array-like of floats Corresponds to the bin_thresholds_ attribute of the BinMapper. For each feature, this stores: - the bin frontiers for continuous features - the unique raw category values for categorical features Returns ------- A TreePredictor object. """
/usr/src/app/target_test_cases/failed_tests_TreeGrower.make_predictor.txt
def make_predictor(self, binning_thresholds): """Make a TreePredictor object out of the current tree. Parameters ---------- binning_thresholds : array-like of floats Corresponds to the bin_thresholds_ attribute of the BinMapper. For each feature, this stores: - the bin frontiers for continuous features - the unique raw category values for categorical features Returns ------- A TreePredictor object. """ predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE) binned_left_cat_bitsets = np.zeros( (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE ) raw_left_cat_bitsets = np.zeros( (self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE ) _fill_predictor_arrays( predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets, self.root, binning_thresholds, self.n_bins_non_missing, ) return TreePredictor( predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets )
TreeGrower.make_predictor
scikit-learn
193
sklearn/ensemble/_hist_gradient_boosting/predictor.py
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads): """Predict raw values for non-binned data. Parameters ---------- X : ndarray, shape (n_samples, n_features) The input samples. known_cat_bitsets : ndarray of shape (n_categorical_features, 8) Array of bitsets of known categories, for each categorical feature. f_idx_map : ndarray of shape (n_features,) Map from original feature index to the corresponding index in the known_cat_bitsets array. n_threads : int Number of OpenMP threads to use. Returns ------- y : ndarray, shape (n_samples,) The raw predicted values. """
/usr/src/app/target_test_cases/failed_tests_TreePredictor.predict.txt
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads): """Predict raw values for non-binned data. Parameters ---------- X : ndarray, shape (n_samples, n_features) The input samples. known_cat_bitsets : ndarray of shape (n_categorical_features, 8) Array of bitsets of known categories, for each categorical feature. f_idx_map : ndarray of shape (n_features,) Map from original feature index to the corresponding index in the known_cat_bitsets array. n_threads : int Number of OpenMP threads to use. Returns ------- y : ndarray, shape (n_samples,) The raw predicted values. """ out = np.empty(X.shape[0], dtype=Y_DTYPE) _predict_from_raw_data( self.nodes, X, self.raw_left_cat_bitsets, known_cat_bitsets, f_idx_map, n_threads, out, ) return out
TreePredictor.predict
scikit-learn
194
sklearn/ensemble/_hist_gradient_boosting/predictor.py
def predict_binned(self, X, missing_values_bin_idx, n_threads): """Predict raw values for binned data. Parameters ---------- X : ndarray, shape (n_samples, n_features) The input samples. missing_values_bin_idx : uint8 Index of the bin that is used for missing values. This is the index of the last bin and is always equal to max_bins (as passed to the GBDT classes), or equivalently to n_bins - 1. n_threads : int Number of OpenMP threads to use. Returns ------- y : ndarray, shape (n_samples,) The raw predicted values. """
/usr/src/app/target_test_cases/failed_tests_TreePredictor.predict_binned.txt
def predict_binned(self, X, missing_values_bin_idx, n_threads): """Predict raw values for binned data. Parameters ---------- X : ndarray, shape (n_samples, n_features) The input samples. missing_values_bin_idx : uint8 Index of the bin that is used for missing values. This is the index of the last bin and is always equal to max_bins (as passed to the GBDT classes), or equivalently to n_bins - 1. n_threads : int Number of OpenMP threads to use. Returns ------- y : ndarray, shape (n_samples,) The raw predicted values. """ out = np.empty(X.shape[0], dtype=Y_DTYPE) _predict_from_binned_data( self.nodes, X, self.binned_left_cat_bitsets, missing_values_bin_idx, n_threads, out, ) return out
TreePredictor.predict_binned
scikit-learn
195
sklearn/decomposition/_truncated_svd.py
def fit_transform(self, X, y=None): """Fit model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : Ignored Not used, present here for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """
/usr/src/app/target_test_cases/failed_tests_TruncatedSVD.fit_transform.txt
def fit_transform(self, X, y=None): """Fit model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : Ignored Not used, present here for API consistency by convention. Returns ------- X_new : ndarray of shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = validate_data(self, X, accept_sparse=["csr", "csc"], ensure_min_features=2) random_state = check_random_state(self.random_state) if self.algorithm == "arpack": v0 = _init_arpack_v0(min(X.shape), random_state) U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] # u_based_decision=False is needed to be consistent with PCA. U, VT = svd_flip(U[:, ::-1], VT[::-1], u_based_decision=False) elif self.algorithm == "randomized": if self.n_components > X.shape[1]: raise ValueError( f"n_components({self.n_components}) must be <=" f" n_features({X.shape[1]})." ) U, Sigma, VT = randomized_svd( X, self.n_components, n_iter=self.n_iter, n_oversamples=self.n_oversamples, power_iteration_normalizer=self.power_iteration_normalizer, random_state=random_state, flip_sign=False, ) U, VT = svd_flip(U, VT, u_based_decision=False) self.components_ = VT # As a result of the SVD approximation error on X ~ U @ Sigma @ V.T, # X @ V is not the same as U @ Sigma if self.algorithm == "randomized" or ( self.algorithm == "arpack" and self.tol > 0 ): X_transformed = safe_sparse_dot(X, self.components_.T) else: X_transformed = U * Sigma # Calculate explained variance & explained variance ratio self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis(X, axis=0) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var self.singular_values_ = Sigma # Store the singular values. return X_transformed
TruncatedSVD.fit_transform
scikit-learn
196
sklearn/ensemble/_voting.py
def fit(self, X, y, *, sample_weight=None, **fit_params): """Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Note that this is supported only if all underlying estimators support sample weights. .. versionadded:: 0.18 **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.5 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns the instance itself. """
/usr/src/app/target_test_cases/failed_tests_VotingClassifier.fit.txt
def fit(self, X, y, *, sample_weight=None, **fit_params): """Fit the estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Note that this is supported only if all underlying estimators support sample weights. .. versionadded:: 0.18 **fit_params : dict Parameters to pass to the underlying estimators. .. versionadded:: 1.5 Only available if `enable_metadata_routing=True`, which can be set by using ``sklearn.set_config(enable_metadata_routing=True)``. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Returns the instance itself. """ _raise_for_params(fit_params, self, "fit") y_type = type_of_target(y, input_name="y") if y_type in ("unknown", "continuous"): # raise a specific ValueError for non-classification tasks raise ValueError( f"Unknown label type: {y_type}. Maybe you are trying to fit a " "classifier, which expects discrete classes on a " "regression target with continuous values." ) elif y_type not in ("binary", "multiclass"): # raise a NotImplementedError for backward compatibility for non-supported # classification tasks raise NotImplementedError( f"{self.__class__.__name__} only supports binary or multiclass " "classification. Multilabel and multi-output classification are not " "supported." ) self.le_ = LabelEncoder().fit(y) self.classes_ = self.le_.classes_ transformed_y = self.le_.transform(y) if sample_weight is not None: fit_params["sample_weight"] = sample_weight return super().fit(X, transformed_y, **fit_params)
VotingClassifier.fit
scikit-learn
197
sklearn/ensemble/_voting.py
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """
/usr/src/app/target_test_cases/failed_tests_VotingClassifier.get_feature_names_out.txt
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") if self.voting == "soft" and not self.flatten_transform: raise ValueError( "get_feature_names_out is not supported when `voting='soft'` and " "`flatten_transform=False`" ) _check_feature_names_in(self, input_features, generate_names=False) class_name = self.__class__.__name__.lower() active_names = [name for name, est in self.estimators if est != "drop"] if self.voting == "hard": return np.asarray( [f"{class_name}_{name}" for name in active_names], dtype=object ) # voting == "soft" n_classes = len(self.classes_) names_out = [ f"{class_name}_{name}{i}" for name in active_names for i in range(n_classes) ] return np.asarray(names_out, dtype=object)
VotingClassifier.get_feature_names_out
scikit-learn
198
sklearn/ensemble/_voting.py
def predict(self, X): """Predict class labels for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- maj : array-like of shape (n_samples,) Predicted class labels. """
/usr/src/app/target_test_cases/failed_tests_VotingClassifier.predict.txt
def predict(self, X): """Predict class labels for X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Returns ------- maj : array-like of shape (n_samples,) Predicted class labels. """ check_is_fitted(self) if self.voting == "soft": maj = np.argmax(self.predict_proba(X), axis=1) else: # 'hard' voting predictions = self._predict(X) maj = np.apply_along_axis( lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)), axis=1, arr=predictions, ) maj = self.le_.inverse_transform(maj) return maj
VotingClassifier.predict
scikit-learn
199
sklearn/ensemble/_voting.py
def transform(self, X): """Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- probabilities_or_labels If `voting='soft'` and `flatten_transform=True`: returns ndarray of shape (n_samples, n_classifiers * n_classes), being class probabilities calculated by each classifier. If `voting='soft' and `flatten_transform=False`: ndarray of shape (n_classifiers, n_samples, n_classes) If `voting='hard'`: ndarray of shape (n_samples, n_classifiers), being class labels predicted by each classifier. """
/usr/src/app/target_test_cases/failed_tests_VotingClassifier.transform.txt
def transform(self, X): """Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- probabilities_or_labels If `voting='soft'` and `flatten_transform=True`: returns ndarray of shape (n_samples, n_classifiers * n_classes), being class probabilities calculated by each classifier. If `voting='soft' and `flatten_transform=False`: ndarray of shape (n_classifiers, n_samples, n_classes) If `voting='hard'`: ndarray of shape (n_samples, n_classifiers), being class labels predicted by each classifier. """ check_is_fitted(self) if self.voting == "soft": probas = self._collect_probas(X) if not self.flatten_transform: return probas return np.hstack(probas) else: return self._predict(X)
VotingClassifier.transform
scikit-learn
200
sklearn/ensemble/_voting.py
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """
/usr/src/app/target_test_cases/failed_tests_VotingRegressor.get_feature_names_out.txt
def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ check_is_fitted(self, "n_features_in_") _check_feature_names_in(self, input_features, generate_names=False) class_name = self.__class__.__name__.lower() return np.asarray( [f"{class_name}_{name}" for name, est in self.estimators if est != "drop"], dtype=object, )
VotingRegressor.get_feature_names_out
scikit-learn
201
sklearn/ensemble/_hist_gradient_boosting/binning.py
def fit(self, X, y=None): """Fit data X by computing the binning thresholds. The last bin is reserved for missing values, whether missing values are present in the data or not. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to bin. y: None Ignored. Returns ------- self : object """
/usr/src/app/target_test_cases/failed_tests__BinMapper.fit.txt
def fit(self, X, y=None): """Fit data X by computing the binning thresholds. The last bin is reserved for missing values, whether missing values are present in the data or not. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to bin. y: None Ignored. Returns ------- self : object """ if not (3 <= self.n_bins <= 256): # min is 3: at least 2 distinct bins and a missing values bin raise ValueError( "n_bins={} should be no smaller than 3 and no larger than 256.".format( self.n_bins ) ) X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False) max_bins = self.n_bins - 1 rng = check_random_state(self.random_state) if self.subsample is not None and X.shape[0] > self.subsample: subset = rng.choice(X.shape[0], self.subsample, replace=False) X = X.take(subset, axis=0) if self.is_categorical is None: self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8) else: self.is_categorical_ = np.asarray(self.is_categorical, dtype=np.uint8) n_features = X.shape[1] known_categories = self.known_categories if known_categories is None: known_categories = [None] * n_features # validate is_categorical and known_categories parameters for f_idx in range(n_features): is_categorical = self.is_categorical_[f_idx] known_cats = known_categories[f_idx] if is_categorical and known_cats is None: raise ValueError( f"Known categories for feature {f_idx} must be provided." ) if not is_categorical and known_cats is not None: raise ValueError( f"Feature {f_idx} isn't marked as a categorical feature, " "but categories were passed." ) self.missing_values_bin_idx_ = self.n_bins - 1 self.bin_thresholds_ = [None] * n_features n_bins_non_missing = [None] * n_features non_cat_thresholds = Parallel(n_jobs=self.n_threads, backend="threading")( delayed(_find_binning_thresholds)(X[:, f_idx], max_bins) for f_idx in range(n_features) if not self.is_categorical_[f_idx] ) non_cat_idx = 0 for f_idx in range(n_features): if self.is_categorical_[f_idx]: # Since categories are assumed to be encoded in # [0, n_cats] and since n_cats <= max_bins, # the thresholds *are* the unique categorical values. This will # lead to the correct mapping in transform() thresholds = known_categories[f_idx] n_bins_non_missing[f_idx] = thresholds.shape[0] self.bin_thresholds_[f_idx] = thresholds else: self.bin_thresholds_[f_idx] = non_cat_thresholds[non_cat_idx] n_bins_non_missing[f_idx] = self.bin_thresholds_[f_idx].shape[0] + 1 non_cat_idx += 1 self.n_bins_non_missing_ = np.array(n_bins_non_missing, dtype=np.uint32) return self
_BinMapper.fit
scikit-learn
202
sklearn/ensemble/_hist_gradient_boosting/binning.py
def transform(self, X): """Bin data X. Missing values will be mapped to the last bin. For categorical features, the mapping will be incorrect for unknown categories. Since the BinMapper is given known_categories of the entire training data (i.e. before the call to train_test_split() in case of early-stopping), this never happens. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to bin. Returns ------- X_binned : array-like of shape (n_samples, n_features) The binned data (fortran-aligned). """
/usr/src/app/target_test_cases/failed_tests__BinMapper.transform.txt
def transform(self, X): """Bin data X. Missing values will be mapped to the last bin. For categorical features, the mapping will be incorrect for unknown categories. Since the BinMapper is given known_categories of the entire training data (i.e. before the call to train_test_split() in case of early-stopping), this never happens. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to bin. Returns ------- X_binned : array-like of shape (n_samples, n_features) The binned data (fortran-aligned). """ X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False) check_is_fitted(self) if X.shape[1] != self.n_bins_non_missing_.shape[0]: raise ValueError( "This estimator was fitted with {} features but {} got passed " "to transform()".format(self.n_bins_non_missing_.shape[0], X.shape[1]) ) n_threads = _openmp_effective_n_threads(self.n_threads) binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order="F") _map_to_bins( X, self.bin_thresholds_, self.is_categorical_, self.missing_values_bin_idx_, n_threads, binned, ) return binned
_BinMapper.transform
scikit-learn
203
sklearn/calibration.py
def predict_proba(self, X): """Calculate calibrated probabilities. Calculates classification calibrated probabilities for each class, in a one-vs-all manner, for `X`. Parameters ---------- X : ndarray of shape (n_samples, n_features) The sample data. Returns ------- proba : array, shape (n_samples, n_classes) The predicted probabilities. Can be exact zeros. """
/usr/src/app/target_test_cases/failed_tests__CalibratedClassifier.predict_proba.txt
def predict_proba(self, X): """Calculate calibrated probabilities. Calculates classification calibrated probabilities for each class, in a one-vs-all manner, for `X`. Parameters ---------- X : ndarray of shape (n_samples, n_features) The sample data. Returns ------- proba : array, shape (n_samples, n_classes) The predicted probabilities. Can be exact zeros. """ predictions, _ = _get_response_values( self.estimator, X, response_method=["decision_function", "predict_proba"], ) if predictions.ndim == 1: # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` predictions = predictions.reshape(-1, 1) n_classes = len(self.classes) label_encoder = LabelEncoder().fit(self.classes) pos_class_indices = label_encoder.transform(self.estimator.classes_) proba = np.zeros((_num_samples(X), n_classes)) for class_idx, this_pred, calibrator in zip( pos_class_indices, predictions.T, self.calibrators ): if n_classes == 2: # When binary, `predictions` consists only of predictions for # clf.classes_[1] but `pos_class_indices` = 0 class_idx += 1 proba[:, class_idx] = calibrator.predict(this_pred) # Normalize the probabilities if n_classes == 2: proba[:, 0] = 1.0 - proba[:, 1] else: denominator = np.sum(proba, axis=1)[:, np.newaxis] # In the edge case where for each class calibrator returns a null # probability for a given sample, use the uniform distribution # instead. uniform_proba = np.full_like(proba, 1 / n_classes) proba = np.divide( proba, denominator, out=uniform_proba, where=denominator != 0 ) # Deal with cases where the predicted probability minimally exceeds 1.0 proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0 return proba
_CalibratedClassifier.predict_proba
scikit-learn
204
sklearn/linear_model/_glm/glm.py
def fit(self, X, y, sample_weight=None): """Fit a Generalized Linear Model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- self : object Fitted model. """
/usr/src/app/target_test_cases/failed_tests__GeneralizedLinearRegressor.fit.txt
def fit(self, X, y, sample_weight=None): """Fit a Generalized Linear Model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- self : object Fitted model. """ X, y = validate_data( self, X, y, accept_sparse=["csc", "csr"], dtype=[np.float64, np.float32], y_numeric=True, multi_output=False, ) # required by losses if self.solver == "lbfgs": # lbfgs will force coef and therefore raw_prediction to be float64. The # base_loss needs y, X @ coef and sample_weight all of same dtype # (and contiguous). loss_dtype = np.float64 else: loss_dtype = min(max(y.dtype, X.dtype), np.float64) y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False) if sample_weight is not None: # Note that _check_sample_weight calls check_array(order="C") required by # losses. sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype) n_samples, n_features = X.shape self._base_loss = self._get_loss() linear_loss = LinearModelLoss( base_loss=self._base_loss, fit_intercept=self.fit_intercept, ) if not linear_loss.base_loss.in_y_true_range(y): raise ValueError( "Some value(s) of y are out of the valid range of the loss" f" {self._base_loss.__class__.__name__!r}." ) # TODO: if alpha=0 check that X is not rank deficient # NOTE: Rescaling of sample_weight: # We want to minimize # obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance) # + 1/2 * alpha * L2, # with # deviance = 2 * loss. # The objective is invariant to multiplying sample_weight by a constant. We # could choose this constant such that sum(sample_weight) = 1 in order to end # up with # obj = sum(sample_weight * loss) + 1/2 * alpha * L2. # But LinearModelLoss.loss() already computes # average(loss, weights=sample_weight) # Thus, without rescaling, we have # obj = LinearModelLoss.loss(...) if self.warm_start and hasattr(self, "coef_"): if self.fit_intercept: # LinearModelLoss needs intercept at the end of coefficient array. coef = np.concatenate((self.coef_, np.array([self.intercept_]))) else: coef = self.coef_ coef = coef.astype(loss_dtype, copy=False) else: coef = linear_loss.init_zero_coef(X, dtype=loss_dtype) if self.fit_intercept: coef[-1] = linear_loss.base_loss.link.link( np.average(y, weights=sample_weight) ) l2_reg_strength = self.alpha n_threads = _openmp_effective_n_threads() # Algorithms for optimization: # Note again that our losses implement 1/2 * deviance. if self.solver == "lbfgs": func = linear_loss.loss_gradient opt_res = scipy.optimize.minimize( func, coef, method="L-BFGS-B", jac=True, options={ "maxiter": self.max_iter, "maxls": 50, # default is 20 "iprint": self.verbose - 1, "gtol": self.tol, # The constant 64 was found empirically to pass the test suite. # The point is that ftol is very small, but a bit larger than # machine precision for float64, which is the dtype used by lbfgs. "ftol": 64 * np.finfo(float).eps, }, args=(X, y, sample_weight, l2_reg_strength, n_threads), ) self.n_iter_ = _check_optimize_result("lbfgs", opt_res) coef = opt_res.x elif self.solver == "newton-cholesky": sol = NewtonCholeskySolver( coef=coef, linear_loss=linear_loss, l2_reg_strength=l2_reg_strength, tol=self.tol, max_iter=self.max_iter, n_threads=n_threads, verbose=self.verbose, ) coef = sol.solve(X, y, sample_weight) self.n_iter_ = sol.iteration elif issubclass(self.solver, NewtonSolver): sol = self.solver( coef=coef, linear_loss=linear_loss, l2_reg_strength=l2_reg_strength, tol=self.tol, max_iter=self.max_iter, n_threads=n_threads, ) coef = sol.solve(X, y, sample_weight) self.n_iter_ = sol.iteration else: raise ValueError(f"Invalid solver={self.solver}.") if self.fit_intercept: self.intercept_ = coef[-1] self.coef_ = coef[:-1] else: # set intercept to zero as the other linear models do self.intercept_ = 0.0 self.coef_ = coef return self
_GeneralizedLinearRegressor.fit
scikit-learn
205
sklearn/utils/_estimator_html_repr.py
def _get_doc_link(self): """Generates a link to the API documentation for a given estimator. This method generates the link to the estimator's documentation page by using the template defined by the attribute `_doc_link_template`. Returns ------- url : str The URL to the API documentation for this estimator. If the estimator does not belong to module `_doc_link_module`, the empty string (i.e. `""`) is returned. """
/usr/src/app/target_test_cases/failed_tests__HTMLDocumentationLinkMixin._get_doc_link.txt
def _get_doc_link(self): """Generates a link to the API documentation for a given estimator. This method generates the link to the estimator's documentation page by using the template defined by the attribute `_doc_link_template`. Returns ------- url : str The URL to the API documentation for this estimator. If the estimator does not belong to module `_doc_link_module`, the empty string (i.e. `""`) is returned. """ if self.__class__.__module__.split(".")[0] != self._doc_link_module: return "" if self._doc_link_url_param_generator is None: estimator_name = self.__class__.__name__ # Construct the estimator's module name, up to the first private submodule. # This works because in scikit-learn all public estimators are exposed at # that level, even if they actually live in a private sub-module. estimator_module = ".".join( itertools.takewhile( lambda part: not part.startswith("_"), self.__class__.__module__.split("."), ) ) return self._doc_link_template.format( estimator_module=estimator_module, estimator_name=estimator_name ) return self._doc_link_template.format(**self._doc_link_url_param_generator())
_HTMLDocumentationLinkMixin._get_doc_link
scikit-learn
206
sklearn/linear_model/_ridge.py
def _compute_covariance(self, X, sqrt_sw): """Computes covariance matrix X^TX with possible centering. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) The preprocessed design matrix. sqrt_sw : ndarray of shape (n_samples,) square roots of sample weights Returns ------- covariance : ndarray of shape (n_features, n_features) The covariance matrix. X_mean : ndarray of shape (n_feature,) The weighted mean of ``X`` for each feature. Notes ----- Since X is sparse it has not been centered in preprocessing, but it has been scaled by sqrt(sample weights). When self.fit_intercept is False no centering is done. The centered X is never actually computed because centering would break the sparsity of X. """
/usr/src/app/target_test_cases/failed_tests__RidgeGCV._compute_covariance.txt
def _compute_covariance(self, X, sqrt_sw): """Computes covariance matrix X^TX with possible centering. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) The preprocessed design matrix. sqrt_sw : ndarray of shape (n_samples,) square roots of sample weights Returns ------- covariance : ndarray of shape (n_features, n_features) The covariance matrix. X_mean : ndarray of shape (n_feature,) The weighted mean of ``X`` for each feature. Notes ----- Since X is sparse it has not been centered in preprocessing, but it has been scaled by sqrt(sample weights). When self.fit_intercept is False no centering is done. The centered X is never actually computed because centering would break the sparsity of X. """ if not self.fit_intercept: # in this case centering has been done in preprocessing # or we are not fitting an intercept. X_mean = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X.T, X, dense_output=True), X_mean # this function only gets called for sparse X n_samples = X.shape[0] sample_weight_matrix = sparse.dia_matrix( (sqrt_sw, 0), shape=(n_samples, n_samples) ) X_weighted = sample_weight_matrix.dot(X) X_mean, _ = mean_variance_axis(X_weighted, axis=0) X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw) weight_sum = sqrt_sw.dot(sqrt_sw) return ( safe_sparse_dot(X.T, X, dense_output=True) - weight_sum * np.outer(X_mean, X_mean), X_mean, )
_RidgeGCV._compute_covariance
scikit-learn
207
sklearn/linear_model/_ridge.py
def _compute_gram(self, X, sqrt_sw): """Computes the Gram matrix XX^T with possible centering. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The preprocessed design matrix. sqrt_sw : ndarray of shape (n_samples,) square roots of sample weights Returns ------- gram : ndarray of shape (n_samples, n_samples) The Gram matrix. X_mean : ndarray of shape (n_feature,) The weighted mean of ``X`` for each feature. Notes ----- When X is dense the centering has been done in preprocessing so the mean is 0 and we just compute XX^T. When X is sparse it has not been centered in preprocessing, but it has been scaled by sqrt(sample weights). When self.fit_intercept is False no centering is done. The centered X is never actually computed because centering would break the sparsity of X. """
/usr/src/app/target_test_cases/failed_tests__RidgeGCV._compute_gram.txt
def _compute_gram(self, X, sqrt_sw): """Computes the Gram matrix XX^T with possible centering. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The preprocessed design matrix. sqrt_sw : ndarray of shape (n_samples,) square roots of sample weights Returns ------- gram : ndarray of shape (n_samples, n_samples) The Gram matrix. X_mean : ndarray of shape (n_feature,) The weighted mean of ``X`` for each feature. Notes ----- When X is dense the centering has been done in preprocessing so the mean is 0 and we just compute XX^T. When X is sparse it has not been centered in preprocessing, but it has been scaled by sqrt(sample weights). When self.fit_intercept is False no centering is done. The centered X is never actually computed because centering would break the sparsity of X. """ center = self.fit_intercept and sparse.issparse(X) if not center: # in this case centering has been done in preprocessing # or we are not fitting an intercept. X_mean = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X, X.T, dense_output=True), X_mean # X is sparse n_samples = X.shape[0] sample_weight_matrix = sparse.dia_matrix( (sqrt_sw, 0), shape=(n_samples, n_samples) ) X_weighted = sample_weight_matrix.dot(X) X_mean, _ = mean_variance_axis(X_weighted, axis=0) X_mean *= n_samples / sqrt_sw.dot(sqrt_sw) X_mX = sqrt_sw[:, None] * safe_sparse_dot(X_mean, X.T, dense_output=True) X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean) return ( safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, X_mean, )
_RidgeGCV._compute_gram
scikit-learn
208
sklearn/utils/validation.py
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9): """Check allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : {array-like, sparse matrix} First array to compare. y : {array-like, sparse matrix} Second array to compare. rtol : float, default=1e-7 Relative tolerance; see numpy.allclose. atol : float, default=1e-9 absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0. """
/usr/src/app/target_test_cases/failed_tests__allclose_dense_sparse.txt
def _allclose_dense_sparse(x, y, rtol=1e-7, atol=1e-9): """Check allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : {array-like, sparse matrix} First array to compare. y : {array-like, sparse matrix} Second array to compare. rtol : float, default=1e-7 Relative tolerance; see numpy.allclose. atol : float, default=1e-9 absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0. """ if sp.issparse(x) and sp.issparse(y): x = x.tocsr() y = y.tocsr() x.sum_duplicates() y.sum_duplicates() return ( np.array_equal(x.indices, y.indices) and np.array_equal(x.indptr, y.indptr) and np.allclose(x.data, y.data, rtol=rtol, atol=atol) ) elif not sp.issparse(x) and not sp.issparse(y): return np.allclose(x, y, rtol=rtol, atol=atol) raise ValueError( "Can only compare two sparse matrices, not a sparse matrix and an array" )
_allclose_dense_sparse
scikit-learn
209
sklearn/decomposition/_pca.py
def _assess_dimension(spectrum, rank, n_samples): """Compute the log-likelihood of a rank ``rank`` dataset. The dataset is assumed to be embedded in gaussian noise of shape(n, dimf) having spectrum ``spectrum``. This implements the method of T. P. Minka. Parameters ---------- spectrum : ndarray of shape (n_features,) Data spectrum. rank : int Tested rank value. It should be strictly lower than n_features, otherwise the method isn't specified (division by zero in equation (31) from the paper). n_samples : int Number of samples. Returns ------- ll : float The log-likelihood. References ---------- This implements the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 <https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_ """
/usr/src/app/target_test_cases/failed_tests__assess_dimension.txt
def _assess_dimension(spectrum, rank, n_samples): """Compute the log-likelihood of a rank ``rank`` dataset. The dataset is assumed to be embedded in gaussian noise of shape(n, dimf) having spectrum ``spectrum``. This implements the method of T. P. Minka. Parameters ---------- spectrum : ndarray of shape (n_features,) Data spectrum. rank : int Tested rank value. It should be strictly lower than n_features, otherwise the method isn't specified (division by zero in equation (31) from the paper). n_samples : int Number of samples. Returns ------- ll : float The log-likelihood. References ---------- This implements the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 <https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_ """ xp, _ = get_namespace(spectrum) n_features = spectrum.shape[0] if not 1 <= rank < n_features: raise ValueError("the tested rank should be in [1, n_features - 1]") eps = 1e-15 if spectrum[rank - 1] < eps: # When the tested rank is associated with a small eigenvalue, there's # no point in computing the log-likelihood: it's going to be very # small and won't be the max anyway. Also, it can lead to numerical # issues below when computing pa, in particular in log((spectrum[i] - # spectrum[j]) because this will take the log of something very small. return -xp.inf pu = -rank * log(2.0) for i in range(1, rank + 1): pu += ( gammaln((n_features - i + 1) / 2.0) - log(xp.pi) * (n_features - i + 1) / 2.0 ) pl = xp.sum(xp.log(spectrum[:rank])) pl = -pl * n_samples / 2.0 v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank)) pv = -log(v) * n_samples * (n_features - rank) / 2.0 m = n_features * rank - rank * (rank + 1.0) / 2.0 pp = log(2.0 * xp.pi) * (m + rank) / 2.0 pa = 0.0 spectrum_ = xp.asarray(spectrum, copy=True) spectrum_[rank:n_features] = v for i in range(rank): for j in range(i + 1, spectrum.shape[0]): pa += log( (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]) ) + log(n_samples) ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0 return ll
_assess_dimension
scikit-learn
210
sklearn/metrics/_base.py
def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None): """Average a binary metric for multilabel classification. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. sample_weight : array-like of shape (n_samples,), default=None Sample weights. binary_metric : callable, returns shape [n_classes] The binary metric function to use. Returns ------- score : float or array of shape [n_classes] If not ``None``, average the score, else return the score for each classes. """
/usr/src/app/target_test_cases/failed_tests__average_binary_score.txt
def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None): """Average a binary metric for multilabel classification. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. sample_weight : array-like of shape (n_samples,), default=None Sample weights. binary_metric : callable, returns shape [n_classes] The binary metric function to use. Returns ------- score : float or array of shape [n_classes] If not ``None``, average the score, else return the score for each classes. """ average_options = (None, "micro", "macro", "weighted", "samples") if average not in average_options: raise ValueError("average has to be one of {0}".format(average_options)) y_type = type_of_target(y_true) if y_type not in ("binary", "multilabel-indicator"): raise ValueError("{0} format is not supported".format(y_type)) if y_type == "binary": return binary_metric(y_true, y_score, sample_weight=sample_weight) check_consistent_length(y_true, y_score, sample_weight) y_true = check_array(y_true) y_score = check_array(y_score) not_average_axis = 1 score_weight = sample_weight average_weight = None if average == "micro": if score_weight is not None: score_weight = np.repeat(score_weight, y_true.shape[1]) y_true = y_true.ravel() y_score = y_score.ravel() elif average == "weighted": if score_weight is not None: average_weight = np.sum( np.multiply(y_true, np.reshape(score_weight, (-1, 1))), axis=0 ) else: average_weight = np.sum(y_true, axis=0) if np.isclose(average_weight.sum(), 0.0): return 0 elif average == "samples": # swap average_weight <-> score_weight average_weight = score_weight score_weight = None not_average_axis = 0 if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_score.ndim == 1: y_score = y_score.reshape((-1, 1)) n_classes = y_score.shape[not_average_axis] score = np.zeros((n_classes,)) for c in range(n_classes): y_true_c = y_true.take([c], axis=not_average_axis).ravel() y_score_c = y_score.take([c], axis=not_average_axis).ravel() score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight) # Average the results if average is not None: if average_weight is not None: # Scores with 0 weights are forced to be 0, preventing the average # score from being affected by 0-weighted NaN elements. average_weight = np.asarray(average_weight) score[average_weight == 0] = 0 return np.average(score, weights=average_weight) else: return score
_average_binary_score
scikit-learn
211
sklearn/ensemble/_iforest.py
def _average_path_length(n_samples_leaf): """ The average path length in a n_samples iTree, which is equal to the average path length of an unsuccessful BST search since the latter has the same structure as an isolation tree. Parameters ---------- n_samples_leaf : array-like of shape (n_samples,) The number of training samples in each test sample leaf, for each estimators. Returns ------- average_path_length : ndarray of shape (n_samples,) """
/usr/src/app/target_test_cases/failed_tests__average_path_length.txt
def _average_path_length(n_samples_leaf): """ The average path length in a n_samples iTree, which is equal to the average path length of an unsuccessful BST search since the latter has the same structure as an isolation tree. Parameters ---------- n_samples_leaf : array-like of shape (n_samples,) The number of training samples in each test sample leaf, for each estimators. Returns ------- average_path_length : ndarray of shape (n_samples,) """ n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False) n_samples_leaf_shape = n_samples_leaf.shape n_samples_leaf = n_samples_leaf.reshape((1, -1)) average_path_length = np.zeros(n_samples_leaf.shape) mask_1 = n_samples_leaf <= 1 mask_2 = n_samples_leaf == 2 not_mask = ~np.logical_or(mask_1, mask_2) average_path_length[mask_1] = 0.0 average_path_length[mask_2] = 1.0 average_path_length[not_mask] = ( 2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma) - 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask] ) return average_path_length.reshape(n_samples_leaf_shape)
_average_path_length
scikit-learn
212
sklearn/inspection/_plot/decision_boundary.py
def _check_boundary_response_method(estimator, response_method, class_of_interest): """Validate the response methods to be used with the fitted estimator. Parameters ---------- estimator : object Fitted estimator to check. response_method : {'auto', 'predict_proba', 'decision_function', 'predict'} Specifies whether to use :term:`predict_proba`, :term:`decision_function`, :term:`predict` as the target response. If set to 'auto', the response method is tried in the following order: :term:`decision_function`, :term:`predict_proba`, :term:`predict`. class_of_interest : int, float, bool, str or None The class considered when plotting the decision. Cannot be None if multiclass and `response_method` is 'predict_proba' or 'decision_function'. .. versionadded:: 1.4 Returns ------- prediction_method : list of str or str The name or list of names of the response methods to use. """
/usr/src/app/target_test_cases/failed_tests__check_boundary_response_method.txt
def _check_boundary_response_method(estimator, response_method, class_of_interest): """Validate the response methods to be used with the fitted estimator. Parameters ---------- estimator : object Fitted estimator to check. response_method : {'auto', 'predict_proba', 'decision_function', 'predict'} Specifies whether to use :term:`predict_proba`, :term:`decision_function`, :term:`predict` as the target response. If set to 'auto', the response method is tried in the following order: :term:`decision_function`, :term:`predict_proba`, :term:`predict`. class_of_interest : int, float, bool, str or None The class considered when plotting the decision. Cannot be None if multiclass and `response_method` is 'predict_proba' or 'decision_function'. .. versionadded:: 1.4 Returns ------- prediction_method : list of str or str The name or list of names of the response methods to use. """ has_classes = hasattr(estimator, "classes_") if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]): msg = "Multi-label and multi-output multi-class classifiers are not supported" raise ValueError(msg) if has_classes and len(estimator.classes_) > 2: if response_method not in {"auto", "predict"} and class_of_interest is None: msg = ( "Multiclass classifiers are only supported when `response_method` is " "'predict' or 'auto'. Else you must provide `class_of_interest` to " "plot the decision boundary of a specific class." ) raise ValueError(msg) prediction_method = "predict" if response_method == "auto" else response_method elif response_method == "auto": if is_regressor(estimator): prediction_method = "predict" else: prediction_method = ["decision_function", "predict_proba", "predict"] else: prediction_method = response_method return prediction_method
_check_boundary_response_method
scikit-learn
213
sklearn/inspection/_pd_utils.py
def _check_feature_names(X, feature_names=None): """Check feature names. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. feature_names : None or array-like of shape (n_names,), dtype=str Feature names to check or `None`. Returns ------- feature_names : list of str Feature names validated. If `feature_names` is `None`, then a list of feature names is provided, i.e. the column names of a pandas dataframe or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a NumPy array. """
/usr/src/app/target_test_cases/failed_tests__check_feature_names.txt
def _check_feature_names(X, feature_names=None): """Check feature names. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. feature_names : None or array-like of shape (n_names,), dtype=str Feature names to check or `None`. Returns ------- feature_names : list of str Feature names validated. If `feature_names` is `None`, then a list of feature names is provided, i.e. the column names of a pandas dataframe or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a NumPy array. """ if feature_names is None: if hasattr(X, "columns") and hasattr(X.columns, "tolist"): # get the column names for a pandas dataframe feature_names = X.columns.tolist() else: # define a list of numbered indices for a numpy array feature_names = [f"x{i}" for i in range(X.shape[1])] elif hasattr(feature_names, "tolist"): # convert numpy array or pandas index to a list feature_names = feature_names.tolist() if len(set(feature_names)) != len(feature_names): raise ValueError("feature_names should not contain duplicates.") return feature_names
_check_feature_names
scikit-learn
214
sklearn/model_selection/_validation.py
def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray int array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n) """
/usr/src/app/target_test_cases/failed_tests__check_is_permutation.txt
def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray int array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(indices) is np.arange(n) """ if len(indices) != n_samples: return False hit = np.zeros(n_samples, dtype=bool) hit[indices] = True if not np.all(hit): return False return True
_check_is_permutation
scikit-learn
215
sklearn/utils/validation.py
def _check_method_params(X, params, indices=None): """Check and validate the parameters passed to a specific method like `fit`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data array. params : dict Dictionary containing the parameters passed to the method. indices : array-like of shape (n_samples,), default=None Indices to be selected if the parameter has the same size as `X`. Returns ------- method_params_validated : dict Validated parameters. We ensure that the values support indexing. """
/usr/src/app/target_test_cases/failed_tests__check_method_params.txt
def _check_method_params(X, params, indices=None): """Check and validate the parameters passed to a specific method like `fit`. Parameters ---------- X : array-like of shape (n_samples, n_features) Data array. params : dict Dictionary containing the parameters passed to the method. indices : array-like of shape (n_samples,), default=None Indices to be selected if the parameter has the same size as `X`. Returns ------- method_params_validated : dict Validated parameters. We ensure that the values support indexing. """ from . import _safe_indexing method_params_validated = {} for param_key, param_value in params.items(): if ( not _is_arraylike(param_value) and not sp.issparse(param_value) or _num_samples(param_value) != _num_samples(X) ): # Non-indexable pass-through (for now for backward-compatibility). # https://github.com/scikit-learn/scikit-learn/issues/15805 method_params_validated[param_key] = param_value else: # Any other method_params should support indexing # (e.g. for cross-validation). method_params_validated[param_key] = _make_indexable(param_value) method_params_validated[param_key] = _safe_indexing( method_params_validated[param_key], indices ) return method_params_validated
_check_method_params
scikit-learn
216
sklearn/metrics/_scorer.py
def _check_multimetric_scoring(estimator, scoring): """Check the scoring parameter in cases when multiple metrics are allowed. In addition, multimetric scoring leverages a caching mechanism to not call the same estimator response method multiple times. Hence, the scorer is modified to only use a single response method given a list of response methods and the estimator. Parameters ---------- estimator : sklearn estimator instance The estimator for which the scoring will be applied. scoring : list, tuple or dict Strategy to evaluate the performance of the cross-validated model on the test set. The possibilities are: - a list or tuple of unique strings; - a callable returning a dictionary where they keys are the metric names and the values are the metric scores; - a dictionary with metric names as keys and callables a values. See :ref:`multimetric_grid_search` for an example. Returns ------- scorers_dict : dict A dict mapping each scorer name to its validated scorer. """
/usr/src/app/target_test_cases/failed_tests__check_multimetric_scoring.txt
def _check_multimetric_scoring(estimator, scoring): """Check the scoring parameter in cases when multiple metrics are allowed. In addition, multimetric scoring leverages a caching mechanism to not call the same estimator response method multiple times. Hence, the scorer is modified to only use a single response method given a list of response methods and the estimator. Parameters ---------- estimator : sklearn estimator instance The estimator for which the scoring will be applied. scoring : list, tuple or dict Strategy to evaluate the performance of the cross-validated model on the test set. The possibilities are: - a list or tuple of unique strings; - a callable returning a dictionary where they keys are the metric names and the values are the metric scores; - a dictionary with metric names as keys and callables a values. See :ref:`multimetric_grid_search` for an example. Returns ------- scorers_dict : dict A dict mapping each scorer name to its validated scorer. """ err_msg_generic = ( f"scoring is invalid (got {scoring!r}). Refer to the " "scoring glossary for details: " "https://scikit-learn.org/stable/glossary.html#term-scoring" ) if isinstance(scoring, (list, tuple, set)): err_msg = ( "The list/tuple elements must be unique strings of predefined scorers. " ) try: keys = set(scoring) except TypeError as e: raise ValueError(err_msg) from e if len(keys) != len(scoring): raise ValueError( f"{err_msg} Duplicate elements were found in" f" the given list. {scoring!r}" ) elif len(keys) > 0: if not all(isinstance(k, str) for k in keys): if any(callable(k) for k in keys): raise ValueError( f"{err_msg} One or more of the elements " "were callables. Use a dict of score " "name mapped to the scorer callable. " f"Got {scoring!r}" ) else: raise ValueError( f"{err_msg} Non-string types were found " f"in the given list. Got {scoring!r}" ) scorers = { scorer: check_scoring(estimator, scoring=scorer) for scorer in scoring } else: raise ValueError(f"{err_msg} Empty list was given. {scoring!r}") elif isinstance(scoring, dict): keys = set(scoring) if not all(isinstance(k, str) for k in keys): raise ValueError( "Non-string types were found in the keys of " f"the given dict. scoring={scoring!r}" ) if len(keys) == 0: raise ValueError(f"An empty dict was passed. {scoring!r}") scorers = { key: check_scoring(estimator, scoring=scorer) for key, scorer in scoring.items() } else: raise ValueError(err_msg_generic) return scorers
_check_multimetric_scoring
scikit-learn
217
sklearn/neighbors/_base.py
def _check_precomputed(X): """Check precomputed distance matrix. If the precomputed distance matrix is sparse, it checks that the non-zero entries are sorted by distances. If not, the matrix is copied and sorted. Parameters ---------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. Returns ------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. """
/usr/src/app/target_test_cases/failed_tests__check_precomputed.txt
def _check_precomputed(X): """Check precomputed distance matrix. If the precomputed distance matrix is sparse, it checks that the non-zero entries are sorted by distances. If not, the matrix is copied and sorted. Parameters ---------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. Returns ------- X : {sparse matrix, array-like}, (n_samples, n_samples) Distance matrix to other samples. X may be a sparse matrix, in which case only non-zero elements may be considered neighbors. """ if not issparse(X): X = check_array(X, ensure_non_negative=True, input_name="X") return X else: graph = X if graph.format not in ("csr", "csc", "coo", "lil"): raise TypeError( "Sparse matrix in {!r} format is not supported due to " "its handling of explicit zeros".format(graph.format) ) copied = graph.format != "csr" graph = check_array( graph, accept_sparse="csr", ensure_non_negative=True, input_name="precomputed distance matrix", ) graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True) return graph
_check_precomputed
scikit-learn
218
sklearn/utils/validation.py
def _check_psd_eigenvalues(lambdas, enable_warnings=False): """Check the eigenvalues of a positive semidefinite (PSD) matrix. Checks the provided array of PSD matrix eigenvalues for numerical or conditioning issues and returns a fixed validated version. This method should typically be used if the PSD matrix is user-provided (e.g. a Gram matrix) or computed using a user-provided dissimilarity metric (e.g. kernel function), or if the decomposition process uses approximation methods (randomized SVD, etc.). It checks for three things: - that there are no significant imaginary parts in eigenvalues (more than 1e-5 times the maximum real part). If this check fails, it raises a ``ValueError``. Otherwise all non-significant imaginary parts that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. - that eigenvalues are not all negative. If this check fails, it raises a ``ValueError`` - that there are no significant negative eigenvalues with absolute value more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest positive eigenvalue in double (simple) precision. If this check fails, it raises a ``ValueError``. Otherwise all negative eigenvalues that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Finally, all the positive eigenvalues that are too small (with a value smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Parameters ---------- lambdas : array-like of shape (n_eigenvalues,) Array of eigenvalues to check / fix. enable_warnings : bool, default=False When this is set to ``True``, a ``PositiveSpectrumWarning`` will be raised when there are imaginary parts, negative eigenvalues, or extremely small non-zero eigenvalues. Otherwise no warning will be raised. In both cases, imaginary parts, negative eigenvalues, and extremely small non-zero eigenvalues will be set to zero. Returns ------- lambdas_fixed : ndarray of shape (n_eigenvalues,) A fixed validated copy of the array of eigenvalues. Examples -------- >>> from sklearn.utils.validation import _check_psd_eigenvalues >>> _check_psd_eigenvalues([1, 2]) # nominal case array([1, 2]) >>> _check_psd_eigenvalues([5, 5j]) # significant imag part Traceback (most recent call last): ... ValueError: There are significant imaginary parts in eigenvalues (1 of the maximum real part). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part array([5., 0.]) >>> _check_psd_eigenvalues([-5, -1]) # all negative Traceback (most recent call last): ... ValueError: All eigenvalues are negative (maximum is -1). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -1]) # significant negative Traceback (most recent call last): ... ValueError: There are significant negative eigenvalues (0.2 of the maximum positive). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative array([5., 0.]) >>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small) array([5., 0.]) """
/usr/src/app/target_test_cases/failed_tests__check_psd_eigenvalues.txt
def _check_psd_eigenvalues(lambdas, enable_warnings=False): """Check the eigenvalues of a positive semidefinite (PSD) matrix. Checks the provided array of PSD matrix eigenvalues for numerical or conditioning issues and returns a fixed validated version. This method should typically be used if the PSD matrix is user-provided (e.g. a Gram matrix) or computed using a user-provided dissimilarity metric (e.g. kernel function), or if the decomposition process uses approximation methods (randomized SVD, etc.). It checks for three things: - that there are no significant imaginary parts in eigenvalues (more than 1e-5 times the maximum real part). If this check fails, it raises a ``ValueError``. Otherwise all non-significant imaginary parts that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. - that eigenvalues are not all negative. If this check fails, it raises a ``ValueError`` - that there are no significant negative eigenvalues with absolute value more than 1e-10 (1e-6) and more than 1e-5 (5e-3) times the largest positive eigenvalue in double (simple) precision. If this check fails, it raises a ``ValueError``. Otherwise all negative eigenvalues that may remain are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Finally, all the positive eigenvalues that are too small (with a value smaller than the maximum eigenvalue multiplied by 1e-12 (2e-7)) are set to zero. This operation is traced with a ``PositiveSpectrumWarning`` when ``enable_warnings=True``. Parameters ---------- lambdas : array-like of shape (n_eigenvalues,) Array of eigenvalues to check / fix. enable_warnings : bool, default=False When this is set to ``True``, a ``PositiveSpectrumWarning`` will be raised when there are imaginary parts, negative eigenvalues, or extremely small non-zero eigenvalues. Otherwise no warning will be raised. In both cases, imaginary parts, negative eigenvalues, and extremely small non-zero eigenvalues will be set to zero. Returns ------- lambdas_fixed : ndarray of shape (n_eigenvalues,) A fixed validated copy of the array of eigenvalues. Examples -------- >>> from sklearn.utils.validation import _check_psd_eigenvalues >>> _check_psd_eigenvalues([1, 2]) # nominal case array([1, 2]) >>> _check_psd_eigenvalues([5, 5j]) # significant imag part Traceback (most recent call last): ... ValueError: There are significant imaginary parts in eigenvalues (1 of the maximum real part). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, 5e-5j]) # insignificant imag part array([5., 0.]) >>> _check_psd_eigenvalues([-5, -1]) # all negative Traceback (most recent call last): ... ValueError: All eigenvalues are negative (maximum is -1). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -1]) # significant negative Traceback (most recent call last): ... ValueError: There are significant negative eigenvalues (0.2 of the maximum positive). Either the matrix is not PSD, or there was an issue while computing the eigendecomposition of the matrix. >>> _check_psd_eigenvalues([5, -5e-5]) # insignificant negative array([5., 0.]) >>> _check_psd_eigenvalues([5, 4e-12]) # bad conditioning (too small) array([5., 0.]) """ lambdas = np.array(lambdas) is_double_precision = lambdas.dtype == np.float64 # note: the minimum value available is # - single-precision: np.finfo('float32').eps = 1.2e-07 # - double-precision: np.finfo('float64').eps = 2.2e-16 # the various thresholds used for validation # we may wish to change the value according to precision. significant_imag_ratio = 1e-5 significant_neg_ratio = 1e-5 if is_double_precision else 5e-3 significant_neg_value = 1e-10 if is_double_precision else 1e-6 small_pos_ratio = 1e-12 if is_double_precision else 2e-7 # Check that there are no significant imaginary parts if not np.isreal(lambdas).all(): max_imag_abs = np.abs(np.imag(lambdas)).max() max_real_abs = np.abs(np.real(lambdas)).max() if max_imag_abs > significant_imag_ratio * max_real_abs: raise ValueError( "There are significant imaginary parts in eigenvalues (%g " "of the maximum real part). Either the matrix is not PSD, or " "there was an issue while computing the eigendecomposition " "of the matrix." % (max_imag_abs / max_real_abs) ) # warn about imaginary parts being removed if enable_warnings: warnings.warn( "There are imaginary parts in eigenvalues (%g " "of the maximum real part). Either the matrix is not" " PSD, or there was an issue while computing the " "eigendecomposition of the matrix. Only the real " "parts will be kept." % (max_imag_abs / max_real_abs), PositiveSpectrumWarning, ) # Remove all imaginary parts (even if zero) lambdas = np.real(lambdas) # Check that there are no significant negative eigenvalues max_eig = lambdas.max() if max_eig < 0: raise ValueError( "All eigenvalues are negative (maximum is %g). " "Either the matrix is not PSD, or there was an " "issue while computing the eigendecomposition of " "the matrix." % max_eig ) else: min_eig = lambdas.min() if ( min_eig < -significant_neg_ratio * max_eig and min_eig < -significant_neg_value ): raise ValueError( "There are significant negative eigenvalues (%g" " of the maximum positive). Either the matrix is " "not PSD, or there was an issue while computing " "the eigendecomposition of the matrix." % (-min_eig / max_eig) ) elif min_eig < 0: # Remove all negative values and warn about it if enable_warnings: warnings.warn( "There are negative eigenvalues (%g of the " "maximum positive). Either the matrix is not " "PSD, or there was an issue while computing the" " eigendecomposition of the matrix. Negative " "eigenvalues will be replaced with 0." % (-min_eig / max_eig), PositiveSpectrumWarning, ) lambdas[lambdas < 0] = 0 # Check for conditioning (small positive non-zeros) too_small_lambdas = (0 < lambdas) & (lambdas < small_pos_ratio * max_eig) if too_small_lambdas.any(): if enable_warnings: warnings.warn( "Badly conditioned PSD matrix spectrum: the largest " "eigenvalue is more than %g times the smallest. " "Small eigenvalues will be replaced with 0." "" % (1 / small_pos_ratio), PositiveSpectrumWarning, ) lambdas[too_small_lambdas] = 0 return lambdas
_check_psd_eigenvalues
scikit-learn
219
sklearn/utils/validation.py
def _check_response_method(estimator, response_method): """Check if `response_method` is available in estimator and return it. .. versionadded:: 1.3 Parameters ---------- estimator : estimator instance Classifier or regressor to check. response_method : {"predict_proba", "predict_log_proba", "decision_function", "predict"} or list of such str Specifies the response method to use get prediction from an estimator (i.e. :term:`predict_proba`, :term:`predict_log_proba`, :term:`decision_function` or :term:`predict`). Possible choices are: - if `str`, it corresponds to the name to the method to return; - if a list of `str`, it provides the method names in order of preference. The method returned corresponds to the first method in the list and which is implemented by `estimator`. Returns ------- prediction_method : callable Prediction method of estimator. Raises ------ AttributeError If `response_method` is not available in `estimator`. """
/usr/src/app/target_test_cases/failed_tests__check_response_method.txt
def _check_response_method(estimator, response_method): """Check if `response_method` is available in estimator and return it. .. versionadded:: 1.3 Parameters ---------- estimator : estimator instance Classifier or regressor to check. response_method : {"predict_proba", "predict_log_proba", "decision_function", "predict"} or list of such str Specifies the response method to use get prediction from an estimator (i.e. :term:`predict_proba`, :term:`predict_log_proba`, :term:`decision_function` or :term:`predict`). Possible choices are: - if `str`, it corresponds to the name to the method to return; - if a list of `str`, it provides the method names in order of preference. The method returned corresponds to the first method in the list and which is implemented by `estimator`. Returns ------- prediction_method : callable Prediction method of estimator. Raises ------ AttributeError If `response_method` is not available in `estimator`. """ if isinstance(response_method, str): list_methods = [response_method] else: list_methods = response_method prediction_method = [getattr(estimator, method, None) for method in list_methods] prediction_method = reduce(lambda x, y: x or y, prediction_method) if prediction_method is None: raise AttributeError( f"{estimator.__class__.__name__} has none of the following attributes: " f"{', '.join(list_methods)}." ) return prediction_method
_check_response_method
scikit-learn
220
sklearn/utils/validation.py
def _check_sample_weight( sample_weight, X, dtype=None, copy=False, ensure_non_negative=False ): """Validate sample weights. Note that passing sample_weight=None will output an array of ones. Therefore, in some cases, you may want to protect the call with: if sample_weight is not None: sample_weight = _check_sample_weight(...) Parameters ---------- sample_weight : {ndarray, Number or None}, shape (n_samples,) Input sample weights. X : {ndarray, list, sparse matrix} Input data. ensure_non_negative : bool, default=False, Whether or not the weights are expected to be non-negative. .. versionadded:: 1.0 dtype : dtype, default=None dtype of the validated `sample_weight`. If None, and the input `sample_weight` is an array, the dtype of the input is preserved; otherwise an array with the default numpy dtype is be allocated. If `dtype` is not one of `float32`, `float64`, `None`, the output will be of dtype `float64`. copy : bool, default=False If True, a copy of sample_weight will be created. Returns ------- sample_weight : ndarray of shape (n_samples,) Validated sample weight. It is guaranteed to be "C" contiguous. """
/usr/src/app/target_test_cases/failed_tests__check_sample_weight.txt
def _check_sample_weight( sample_weight, X, dtype=None, copy=False, ensure_non_negative=False ): """Validate sample weights. Note that passing sample_weight=None will output an array of ones. Therefore, in some cases, you may want to protect the call with: if sample_weight is not None: sample_weight = _check_sample_weight(...) Parameters ---------- sample_weight : {ndarray, Number or None}, shape (n_samples,) Input sample weights. X : {ndarray, list, sparse matrix} Input data. ensure_non_negative : bool, default=False, Whether or not the weights are expected to be non-negative. .. versionadded:: 1.0 dtype : dtype, default=None dtype of the validated `sample_weight`. If None, and the input `sample_weight` is an array, the dtype of the input is preserved; otherwise an array with the default numpy dtype is be allocated. If `dtype` is not one of `float32`, `float64`, `None`, the output will be of dtype `float64`. copy : bool, default=False If True, a copy of sample_weight will be created. Returns ------- sample_weight : ndarray of shape (n_samples,) Validated sample weight. It is guaranteed to be "C" contiguous. """ n_samples = _num_samples(X) if dtype is not None and dtype not in [np.float32, np.float64]: dtype = np.float64 if sample_weight is None: sample_weight = np.ones(n_samples, dtype=dtype) elif isinstance(sample_weight, numbers.Number): sample_weight = np.full(n_samples, sample_weight, dtype=dtype) else: if dtype is None: dtype = [np.float64, np.float32] sample_weight = check_array( sample_weight, accept_sparse=False, ensure_2d=False, dtype=dtype, order="C", copy=copy, input_name="sample_weight", ) if sample_weight.ndim != 1: raise ValueError("Sample weights must be 1D array or scalar") if sample_weight.shape != (n_samples,): raise ValueError( "sample_weight.shape == {}, expected {}!".format( sample_weight.shape, (n_samples,) ) ) if ensure_non_negative: check_non_negative(sample_weight, "`sample_weight`") return sample_weight
_check_sample_weight
scikit-learn
221
sklearn/metrics/_classification.py
def _check_targets(y_true, y_pred): """Check that y_true and y_pred belong to the same classification task. This converts multiclass or binary types to a common shape, and raises a ValueError for a mix of multilabel and multiclass targets, a mix of multilabel formats, for the presence of continuous-valued or multioutput targets, or for targets of different lengths. Column vectors are squeezed to 1d, while multilabel formats are returned as CSR sparse label indicators. Parameters ---------- y_true : array-like y_pred : array-like Returns ------- type_true : one of {'multilabel-indicator', 'multiclass', 'binary'} The type of the true target data, as output by ``utils.multiclass.type_of_target``. y_true : array or indicator matrix y_pred : array or indicator matrix """
/usr/src/app/target_test_cases/failed_tests__check_targets.txt
def _check_targets(y_true, y_pred): """Check that y_true and y_pred belong to the same classification task. This converts multiclass or binary types to a common shape, and raises a ValueError for a mix of multilabel and multiclass targets, a mix of multilabel formats, for the presence of continuous-valued or multioutput targets, or for targets of different lengths. Column vectors are squeezed to 1d, while multilabel formats are returned as CSR sparse label indicators. Parameters ---------- y_true : array-like y_pred : array-like Returns ------- type_true : one of {'multilabel-indicator', 'multiclass', 'binary'} The type of the true target data, as output by ``utils.multiclass.type_of_target``. y_true : array or indicator matrix y_pred : array or indicator matrix """ xp, _ = get_namespace(y_true, y_pred) check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true, input_name="y_true") type_pred = type_of_target(y_pred, input_name="y_pred") y_type = {type_true, type_pred} if y_type == {"binary", "multiclass"}: y_type = {"multiclass"} if len(y_type) > 1: raise ValueError( "Classification metrics can't handle a mix of {0} and {1} targets".format( type_true, type_pred ) ) # We can't have more than one value on y_type => The set is no more needed y_type = y_type.pop() # No metrics support "multiclass-multioutput" format if y_type not in ["binary", "multiclass", "multilabel-indicator"]: raise ValueError("{0} is not supported".format(y_type)) if y_type in ["binary", "multiclass"]: xp, _ = get_namespace(y_true, y_pred) y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type == "binary": try: unique_values = _union1d(y_true, y_pred, xp) except TypeError as e: # We expect y_true and y_pred to be of the same data type. # If `y_true` was provided to the classifier as strings, # `y_pred` given by the classifier will also be encoded with # strings. So we raise a meaningful error raise TypeError( "Labels in y_true and y_pred should be of the same type. " f"Got y_true={xp.unique(y_true)} and " f"y_pred={xp.unique(y_pred)}. Make sure that the " "predictions provided by the classifier coincides with " "the true labels." ) from e if unique_values.shape[0] > 2: y_type = "multiclass" if y_type.startswith("multilabel"): if _is_numpy_namespace(xp): # XXX: do we really want to sparse-encode multilabel indicators when # they are passed as a dense arrays? This is not possible for array # API inputs in general hence we only do it for NumPy inputs. But even # for NumPy the usefulness is questionable. y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = "multilabel-indicator" return y_type, y_true, y_pred
_check_targets