repository
stringclasses 11
values | repo_id
stringlengths 1
3
| target_module_path
stringlengths 16
72
| prompt
stringlengths 298
21.7k
| relavent_test_path
stringlengths 50
99
| full_function
stringlengths 336
33.8k
| function_name
stringlengths 2
51
|
---|---|---|---|---|---|---|
scikit-learn | 22 | sklearn/compose/_column_transformer.py | def fit_transform(self, X, y=None, **params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,), default=None
Targets for supervised learning.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``fit`` and
``transform`` methods.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
| /usr/src/app/target_test_cases/failed_tests_ColumnTransformer.fit_transform.txt | def fit_transform(self, X, y=None, **params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Input data, of which specified subsets are used to fit the
transformers.
y : array-like of shape (n_samples,), default=None
Targets for supervised learning.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``fit`` and
``transform`` methods.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
_raise_for_params(params, self, "fit_transform")
_check_feature_names(self, X, reset=True)
X = _check_X(X)
# set n_features_in_ attribute
_check_n_features(self, X, reset=True)
self._validate_transformers()
n_samples = _num_samples(X)
self._validate_column_callables(X)
self._validate_remainder(X)
if _routing_enabled():
routed_params = process_routing(self, "fit_transform", **params)
else:
routed_params = self._get_empty_routing()
result = self._call_func_on_transformers(
X,
y,
_fit_transform_one,
column_as_labels=False,
routed_params=routed_params,
)
if not result:
self._update_fitted_transformers([])
# All transformers are None
return np.zeros((n_samples, 0))
Xs, transformers = zip(*result)
# determine if concatenated output will be sparse or not
if any(sparse.issparse(X) for X in Xs):
nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
total = sum(
X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs
)
density = nnz / total
self.sparse_output_ = density < self.sparse_threshold
else:
self.sparse_output_ = False
self._update_fitted_transformers(transformers)
self._validate_output(Xs)
self._record_output_indices(Xs)
return self._hstack(list(Xs), n_samples=n_samples)
| ColumnTransformer.fit_transform |
scikit-learn | 23 | sklearn/compose/_column_transformer.py | def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
| /usr/src/app/target_test_cases/failed_tests_ColumnTransformer.get_feature_names_out.txt | def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self)
input_features = _check_feature_names_in(self, input_features)
# List of tuples (name, feature_names_out)
transformer_with_feature_names_out = []
for name, trans, *_ in self._iter(
fitted=True,
column_as_labels=False,
skip_empty_columns=True,
skip_drop=True,
):
feature_names_out = self._get_feature_name_out_for_transformer(
name, trans, input_features
)
if feature_names_out is None:
continue
transformer_with_feature_names_out.append((name, feature_names_out))
if not transformer_with_feature_names_out:
# No feature names
return np.array([], dtype=object)
return self._add_prefix_for_feature_names_out(
transformer_with_feature_names_out
)
| ColumnTransformer.get_feature_names_out |
scikit-learn | 24 | sklearn/compose/_column_transformer.py | def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
| /usr/src/app/target_test_cases/failed_tests_ColumnTransformer.get_metadata_routing.txt | def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
# Here we don't care about which columns are used for which
# transformers, and whether or not a transformer is used at all, which
# might happen if no columns are selected for that transformer. We
# request all metadata requested by all transformers.
transformers = chain(self.transformers, [("remainder", self.remainder, None)])
for name, step, _ in transformers:
method_mapping = MethodMapping()
if hasattr(step, "fit_transform"):
(
method_mapping.add(caller="fit", callee="fit_transform").add(
caller="fit_transform", callee="fit_transform"
)
)
else:
(
method_mapping.add(caller="fit", callee="fit")
.add(caller="fit", callee="transform")
.add(caller="fit_transform", callee="fit")
.add(caller="fit_transform", callee="transform")
)
method_mapping.add(caller="transform", callee="transform")
router.add(method_mapping=method_mapping, **{name: step})
return router
| ColumnTransformer.get_metadata_routing |
scikit-learn | 25 | sklearn/compose/_column_transformer.py | def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
Calling `set_output` will set the output of all estimators in `transformers`
and `transformers_`.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.4
`"polars"` option was added.
Returns
-------
self : estimator instance
Estimator instance.
"""
| /usr/src/app/target_test_cases/failed_tests_ColumnTransformer.set_output.txt | def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
Calling `set_output` will set the output of all estimators in `transformers`
and `transformers_`.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.4
`"polars"` option was added.
Returns
-------
self : estimator instance
Estimator instance.
"""
super().set_output(transform=transform)
transformers = (
trans
for _, trans, _ in chain(
self.transformers, getattr(self, "transformers_", [])
)
if trans not in {"passthrough", "drop"}
)
for trans in transformers:
_safe_set_output(trans, transform=transform)
if self.remainder not in {"passthrough", "drop"}:
_safe_set_output(self.remainder, transform=transform)
return self
| ColumnTransformer.set_output |
scikit-learn | 26 | sklearn/compose/_column_transformer.py | def transform(self, X, **params):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be transformed by subset.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``transform``
method.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
| /usr/src/app/target_test_cases/failed_tests_ColumnTransformer.transform.txt | def transform(self, X, **params):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
The data to be transformed by subset.
**params : dict, default=None
Parameters to be passed to the underlying transformers' ``transform``
method.
You can only pass this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.4
Returns
-------
X_t : {array-like, sparse matrix} of \
shape (n_samples, sum_n_components)
Horizontally stacked results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers. If
any result is a sparse matrix, everything will be converted to
sparse matrices.
"""
_raise_for_params(params, self, "transform")
check_is_fitted(self)
X = _check_X(X)
# If ColumnTransformer is fit using a dataframe, and now a dataframe is
# passed to be transformed, we select columns by name instead. This
# enables the user to pass X at transform time with extra columns which
# were not present in fit time, and the order of the columns doesn't
# matter.
fit_dataframe_and_transform_dataframe = hasattr(self, "feature_names_in_") and (
_is_pandas_df(X) or hasattr(X, "__dataframe__")
)
n_samples = _num_samples(X)
column_names = _get_feature_names(X)
if fit_dataframe_and_transform_dataframe:
named_transformers = self.named_transformers_
# check that all names seen in fit are in transform, unless
# they were dropped
non_dropped_indices = [
ind
for name, ind in self._transformer_to_input_indices.items()
if name in named_transformers and named_transformers[name] != "drop"
]
all_indices = set(chain(*non_dropped_indices))
all_names = set(self.feature_names_in_[ind] for ind in all_indices)
diff = all_names - set(column_names)
if diff:
raise ValueError(f"columns are missing: {diff}")
else:
# ndarray was used for fitting or transforming, thus we only
# check that n_features_in_ is consistent
_check_n_features(self, X, reset=False)
if _routing_enabled():
routed_params = process_routing(self, "transform", **params)
else:
routed_params = self._get_empty_routing()
Xs = self._call_func_on_transformers(
X,
None,
_transform_one,
column_as_labels=fit_dataframe_and_transform_dataframe,
routed_params=routed_params,
)
self._validate_output(Xs)
if not Xs:
# All transformers are None
return np.zeros((n_samples, 0))
return self._hstack(list(Xs), n_samples=n_samples)
| ColumnTransformer.transform |
scikit-learn | 27 | sklearn/cluster/_dbscan.py | def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
self : object
Returns a fitted instance of self.
"""
| /usr/src/app/target_test_cases/failed_tests_DBSCAN.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
self : object
Returns a fitted instance of self.
"""
X = validate_data(self, X, accept_sparse="csr")
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Calculate neighborhood for all samples. This leaves the original
# point in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if self.metric == "precomputed" and sparse.issparse(X):
# set the diagonal to explicit values, as a point is its own
# neighbor
X = X.copy() # copy to avoid in-place modification
with warnings.catch_warnings():
warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal())
neighbors_model = NearestNeighbors(
radius=self.eps,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
metric_params=self.metric_params,
p=self.p,
n_jobs=self.n_jobs,
)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array(
[np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]
)
# Initially, all samples are noise.
labels = np.full(X.shape[0], -1, dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
| DBSCAN.fit |
scikit-learn | 28 | sklearn/tree/_classes.py | def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_DecisionTreeClassifier.fit.txt | def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeClassifier
Fitted estimator.
"""
super()._fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
| DecisionTreeClassifier.fit |
scikit-learn | 29 | sklearn/tree/_classes.py | def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
| /usr/src/app/target_test_cases/failed_tests_DecisionTreeClassifier.predict_proba.txt | def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
such arrays if n_outputs > 1
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
return proba[:, : self.n_classes_]
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, : self.n_classes_[k]]
all_proba.append(proba_k)
return all_proba
| DecisionTreeClassifier.predict_proba |
scikit-learn | 30 | sklearn/tree/_classes.py | def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_DecisionTreeRegressor.fit.txt | def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
self : DecisionTreeRegressor
Fitted estimator.
"""
super()._fit(
X,
y,
sample_weight=sample_weight,
check_input=check_input,
)
return self
| DecisionTreeRegressor.fit |
scikit-learn | 31 | sklearn/feature_extraction/_dict_vectorizer.py | def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
self : object
DictVectorizer class instance.
"""
| /usr/src/app/target_test_cases/failed_tests_DictVectorizer.fit.txt | def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
self : object
DictVectorizer class instance.
"""
feature_names = []
vocab = {}
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
elif isinstance(v, Number) or (v is None):
feature_name = f
elif isinstance(v, Mapping):
raise TypeError(
f"Unsupported value type {type(v)} "
f"for {f}: {v}.\n"
"Mapping objects are not supported."
)
elif isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(f, v, feature_names, vocab)
if feature_name is not None:
if feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if self.sort:
feature_names.sort()
vocab = {f: i for i, f in enumerate(feature_names)}
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
| DictVectorizer.fit |
scikit-learn | 32 | sklearn/feature_extraction/_dict_vectorizer.py | def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
"""
| /usr/src/app/target_test_cases/failed_tests_DictVectorizer.inverse_transform.txt | def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
"""
check_is_fitted(self, "feature_names_")
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=["csr", "csc"])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in range(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
| DictVectorizer.inverse_transform |
scikit-learn | 33 | sklearn/dummy.py | def fit(self, X, y, sample_weight=None):
"""Fit the baseline classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_DummyClassifier.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit the baseline classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns the instance itself.
"""
validate_data(self, X, skip_check_array=True)
self._strategy = self.strategy
if self._strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn(
(
"A local copy of the target data has been converted "
"to a numpy array. Predicting on sparse target data "
"with the uniform strategy would not save memory "
"and would be slower."
),
UserWarning,
)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.asarray(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self._strategy == "constant":
if self.constant is None:
raise ValueError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError(
"Constant target value should have shape (%d, 1)."
% self.n_outputs_
)
(self.classes_, self.n_classes_, self.class_prior_) = class_distribution(
y, sample_weight
)
if self._strategy == "constant":
for k in range(self.n_outputs_):
if not any(constant[k][0] == c for c in self.classes_[k]):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
err_msg = (
"The constant target value must be present in "
"the training data. You provided constant={}. "
"Possible values are: {}.".format(
self.constant, self.classes_[k].tolist()
)
)
raise ValueError(err_msg)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
| DummyClassifier.fit |
scikit-learn | 34 | sklearn/dummy.py | def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
| /usr/src/app/target_test_cases/failed_tests_DummyClassifier.predict.txt | def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self._strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == "stratified":
class_prob = class_prior_
elif self._strategy == "uniform":
raise ValueError(
"Sparse target prediction is not "
"supported with the uniform strategy"
)
elif self._strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
else:
if self._strategy in ("most_frequent", "prior"):
y = np.tile(
[
classes_[k][class_prior_[k].argmax()]
for k in range(self.n_outputs_)
],
[n_samples, 1],
)
elif self._strategy == "stratified":
y = np.vstack(
[
classes_[k][proba[k].argmax(axis=1)]
for k in range(self.n_outputs_)
]
).T
elif self._strategy == "uniform":
ret = [
classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)
]
y = np.vstack(ret).T
elif self._strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
| DummyClassifier.predict |
scikit-learn | 35 | sklearn/dummy.py | def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
| /usr/src/app/target_test_cases/failed_tests_DummyClassifier.predict_proba.txt | def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == "most_frequent":
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
return P
| DummyClassifier.predict_proba |
scikit-learn | 36 | sklearn/dummy.py | def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_DummyRegressor.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted estimator.
"""
validate_data(self, X, skip_check_array=True)
y = check_array(y, ensure_2d=False, input_name="y")
if len(y) == 0:
raise ValueError("y must not be empty.")
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [
_weighted_percentile(y[:, k], sample_weight, percentile=50.0)
for k in range(self.n_outputs_)
]
elif self.strategy == "quantile":
if self.quantile is None:
raise ValueError(
"When using `strategy='quantile', you have to specify the desired "
"quantile in the range [0, 1]."
)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [
_weighted_percentile(y[:, k], sample_weight, percentile=percentile)
for k in range(self.n_outputs_)
]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
self.constant_ = check_array(
self.constant,
accept_sparse=["csr", "csc", "coo"],
ensure_2d=False,
ensure_min_samples=0,
)
if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have shape (%d, 1)." % y.shape[1]
)
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
| DummyRegressor.fit |
scikit-learn | 37 | sklearn/dummy.py | def predict(self, X, return_std=False):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
All zeros in this case.
.. versionadded:: 0.20
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
Standard deviation of predictive distribution of query points.
"""
| /usr/src/app/target_test_cases/failed_tests_DummyRegressor.predict.txt | def predict(self, X, return_std=False):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
All zeros in this case.
.. versionadded:: 0.20
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
Standard deviation of predictive distribution of query points.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
y = np.full(
(n_samples, self.n_outputs_),
self.constant_,
dtype=np.array(self.constant_).dtype,
)
y_std = np.zeros((n_samples, self.n_outputs_))
if self.n_outputs_ == 1:
y = np.ravel(y)
y_std = np.ravel(y_std)
return (y, y_std) if return_std else y
| DummyRegressor.predict |
scikit-learn | 38 | sklearn/linear_model/_coordinate_descent.py | def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features)
Data.
Note that large sparse matrices and arrays requiring `int64`
indices are not accepted.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weights. Internally, the `sample_weight` vector will be
rescaled to sum to `n_samples`.
.. versionadded:: 0.23
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
| /usr/src/app/target_test_cases/failed_tests_ElasticNet.fit.txt | def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features)
Data.
Note that large sparse matrices and arrays requiring `int64`
indices are not accepted.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weights. Internally, the `sample_weight` vector will be
rescaled to sum to `n_samples`.
.. versionadded:: 0.23
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn(
(
"With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator"
),
stacklevel=2,
)
# Remember if X is copied
X_copied = False
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X_copied = self.copy_X and self.fit_intercept
X, y = validate_data(
self,
X,
y,
accept_sparse="csc",
order="F",
dtype=[np.float64, np.float32],
force_writeable=True,
accept_large_sparse=False,
copy=X_copied,
multi_output=True,
y_numeric=True,
)
y = check_array(
y, order="F", copy=False, dtype=X.dtype.type, ensure_2d=False
)
n_samples, n_features = X.shape
alpha = self.alpha
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
if check_input:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# TLDR: Rescale sw to sum up to n_samples.
# Long: The objective function of Enet
#
# 1/2 * np.average(squared error, weights=sw)
# + alpha * penalty (1)
#
# is invariant under rescaling of sw.
# But enet_path coordinate descent minimizes
#
# 1/2 * sum(squared error) + alpha' * penalty (2)
#
# and therefore sets
#
# alpha' = n_samples * alpha (3)
#
# inside its function body, which results in objective (2) being
# equivalent to (1) in case of no sw.
# With sw, however, enet_path should set
#
# alpha' = sum(sw) * alpha (4)
#
# Therefore, we use the freedom of Eq. (1) to rescale sw before
# calling enet_path, i.e.
#
# sw *= n_samples / sum(sw)
#
# such that sum(sw) = n_samples. This way, (3) and (4) are the same.
sample_weight = sample_weight * (n_samples / np.sum(sample_weight))
# Note: Alternatively, we could also have rescaled alpha instead
# of sample_weight:
#
# alpha *= np.sum(sample_weight) / n_samples
# Ensure copying happens only once, don't do it again if done above.
# X and y will be rescaled if sample_weight is not None, order='F'
# ensures that the returned X and y are still F-contiguous.
should_copy = self.copy_X and not X_copied
X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit(
X,
y,
None,
self.precompute,
fit_intercept=self.fit_intercept,
copy=should_copy,
check_input=check_input,
sample_weight=sample_weight,
)
# coordinate descent needs F-ordered arrays and _pre_fit might have
# called _rescale_data
if check_input or sample_weight is not None:
X, y = _set_order(X, y, order="F")
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_targets = y.shape[1]
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order="F")
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in range(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = self.path(
X,
y[:, k],
l1_ratio=self.l1_ratio,
eps=None,
n_alphas=None,
alphas=[alpha],
precompute=precompute,
Xy=this_Xy,
copy_X=True,
coef_init=coef_[k],
verbose=False,
return_n_iter=True,
positive=self.positive,
check_input=False,
# from here on **params
tol=self.tol,
X_offset=X_offset,
X_scale=X_scale,
max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
sample_weight=sample_weight,
)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_ = coef_[0]
self.dual_gap_ = dual_gaps_[0]
else:
self.coef_ = coef_
self.dual_gap_ = dual_gaps_
self._set_intercept(X_offset, y_offset, X_scale)
# check for finiteness of coefficients
if not all(np.isfinite(w).all() for w in [self.coef_, self.intercept_]):
raise ValueError(
"Coordinate descent iterations resulted in non-finite parameter"
" values. The input data may contain large values and need to"
" be preprocessed."
)
# return self for chaining fit and predict calls
return self
| ElasticNet.fit |
scikit-learn | 39 | sklearn/covariance/_empirical_covariance.py | def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
"""Compute the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
| /usr/src/app/target_test_cases/failed_tests_EmpiricalCovariance.error_norm.txt | def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
"""Compute the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error**2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented"
)
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
| EmpiricalCovariance.error_norm |
scikit-learn | 40 | sklearn/covariance/_empirical_covariance.py | def fit(self, X, y=None):
"""Fit the maximum likelihood covariance estimator to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_EmpiricalCovariance.fit.txt | def fit(self, X, y=None):
"""Fit the maximum likelihood covariance estimator to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
| EmpiricalCovariance.fit |
scikit-learn | 41 | sklearn/covariance/_empirical_covariance.py | def mahalanobis(self, X):
"""Compute the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
| /usr/src/app/target_test_cases/failed_tests_EmpiricalCovariance.mahalanobis.txt | def mahalanobis(self, X):
"""Compute the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
X = validate_data(self, X, reset=False)
precision = self.get_precision()
with config_context(assume_finite=True):
# compute mahalanobis distances
dist = pairwise_distances(
X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
)
return np.reshape(dist, (len(X),)) ** 2
| EmpiricalCovariance.mahalanobis |
scikit-learn | 42 | sklearn/decomposition/_factor_analysis.py | def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using SVD based approach.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Ignored parameter.
Returns
-------
self : object
FactorAnalysis class instance.
"""
| /usr/src/app/target_test_cases/failed_tests_FactorAnalysis.fit.txt | def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using SVD based approach.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Ignored parameter.
Returns
-------
self : object
FactorAnalysis class instance.
"""
X = validate_data(
self, X, copy=self.copy, dtype=np.float64, force_writeable=True
)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2.0 * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError(
"noise_variance_init dimension does not "
"with number of features : %d != %d"
% (len(self.noise_variance_init), n_features)
)
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == "lapack":
def my_svd(X):
_, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
return (
s[:n_components],
Vt[:n_components],
squared_norm(s[n_components:]),
)
else: # svd_method == "randomized"
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, Vt = randomized_svd(
X,
n_components,
random_state=random_state,
n_iter=self.iterated_power,
)
return s, Vt, squared_norm(X) - squared_norm(s)
for i in range(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt
del Vt
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.0
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W**2, axis=0), SMALL)
else:
warnings.warn(
"FactorAnalysis did not converge."
+ " You might want"
+ " to increase the number of iterations.",
ConvergenceWarning,
)
self.components_ = W
if self.rotation is not None:
self.components_ = self._rotate(W)
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
| FactorAnalysis.fit |
scikit-learn | 43 | sklearn/decomposition/_factor_analysis.py | def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
| /usr/src/app/target_test_cases/failed_tests_FactorAnalysis.score_samples.txt | def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
return log_like
| FactorAnalysis.score_samples |
scikit-learn | 44 | sklearn/decomposition/_factor_analysis.py | def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The latent variables of X.
"""
| /usr/src/app/target_test_cases/failed_tests_FactorAnalysis.transform.txt | def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = np.dot(X_transformed, Wpsi.T)
X_transformed = np.dot(tmp, cov_z)
return X_transformed
| FactorAnalysis.transform |
scikit-learn | 45 | sklearn/decomposition/_fastica.py | def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where `n_samples` is the number of samples
and `n_features` is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
| /usr/src/app/target_test_cases/failed_tests_FastICA.transform.txt | def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where `n_samples` is the number of samples
and `n_features` is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
copy=(copy and self.whiten),
dtype=[np.float64, np.float32],
reset=False,
)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
| FastICA.transform |
scikit-learn | 46 | sklearn/feature_extraction/_hash.py | def transform(self, raw_X):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Feature matrix, for use with estimators or further transformers.
"""
| /usr/src/app/target_test_cases/failed_tests_FeatureHasher.transform.txt | def transform(self, raw_X):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
first_raw_X = next(raw_X)
if isinstance(first_raw_X, str):
raise ValueError(
"Samples can not be a single string. The input must be an iterable"
" over iterables of strings."
)
raw_X_ = chain([first_raw_X], raw_X)
raw_X = (((f, 1) for f in x) for x in raw_X_)
indices, indptr, values = _hashing_transform(
raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix(
(values, indices, indptr),
dtype=self.dtype,
shape=(n_samples, self.n_features),
)
X.sum_duplicates() # also sorts the indices
return X
| FeatureHasher.transform |
scikit-learn | 47 | sklearn/pipeline.py | def fit(self, X, y=None, **fit_params):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `fit` methods of the
sub-transformers.
- If `enable_metadata_routing=True`:
Parameters safely routed to the `fit` methods of the
sub-transformers. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionchanged:: 1.5
`**fit_params` can be routed via metadata routing API.
Returns
-------
self : object
FeatureUnion class instance.
"""
| /usr/src/app/target_test_cases/failed_tests_FeatureUnion.fit.txt | def fit(self, X, y=None, **fit_params):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**fit_params : dict, default=None
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `fit` methods of the
sub-transformers.
- If `enable_metadata_routing=True`:
Parameters safely routed to the `fit` methods of the
sub-transformers. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionchanged:: 1.5
`**fit_params` can be routed via metadata routing API.
Returns
-------
self : object
FeatureUnion class instance.
"""
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
for name, _ in self.transformer_list:
routed_params[name] = Bunch(fit={})
routed_params[name].fit = fit_params
transformers = self._parallel_func(X, y, _fit_one, routed_params)
if not transformers:
# All transformers are None
return self
self._update_transformer_list(transformers)
return self
| FeatureUnion.fit |
scikit-learn | 48 | sklearn/pipeline.py | def fit_transform(self, X, y=None, **params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**params : dict, default=None
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `fit` methods of the
sub-transformers.
- If `enable_metadata_routing=True`:
Parameters safely routed to the `fit` methods of the
sub-transformers. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionchanged:: 1.5
`**params` can now be routed via metadata routing API.
Returns
-------
X_t : array-like or sparse matrix of \
shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
| /usr/src/app/target_test_cases/failed_tests_FeatureUnion.fit_transform.txt | def fit_transform(self, X, y=None, **params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like of shape (n_samples, n_outputs), default=None
Targets for supervised learning.
**params : dict, default=None
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `fit` methods of the
sub-transformers.
- If `enable_metadata_routing=True`:
Parameters safely routed to the `fit` methods of the
sub-transformers. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionchanged:: 1.5
`**params` can now be routed via metadata routing API.
Returns
-------
X_t : array-like or sparse matrix of \
shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
if _routing_enabled():
routed_params = process_routing(self, "fit_transform", **params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
for name, obj in self.transformer_list:
if hasattr(obj, "fit_transform"):
routed_params[name] = Bunch(fit_transform={})
routed_params[name].fit_transform = params
else:
routed_params[name] = Bunch(fit={})
routed_params[name] = Bunch(transform={})
routed_params[name].fit = params
results = self._parallel_func(X, y, _fit_transform_one, routed_params)
if not results:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*results)
self._update_transformer_list(transformers)
return self._hstack(Xs)
| FeatureUnion.fit_transform |
scikit-learn | 49 | sklearn/pipeline.py | def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
| /usr/src/app/target_test_cases/failed_tests_FeatureUnion.get_feature_names_out.txt | def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
# List of tuples (name, feature_names_out)
transformer_with_feature_names_out = []
for name, trans, _ in self._iter():
if not hasattr(trans, "get_feature_names_out"):
raise AttributeError(
"Transformer %s (type %s) does not provide get_feature_names_out."
% (str(name), type(trans).__name__)
)
feature_names_out = trans.get_feature_names_out(input_features)
transformer_with_feature_names_out.append((name, feature_names_out))
return self._add_prefix_for_feature_names_out(
transformer_with_feature_names_out
)
| FeatureUnion.get_feature_names_out |
scikit-learn | 50 | sklearn/pipeline.py | def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
| /usr/src/app/target_test_cases/failed_tests_FeatureUnion.get_metadata_routing.txt | def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self.__class__.__name__)
for name, transformer in self.transformer_list:
router.add(
**{name: transformer},
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="fit_transform", callee="fit_transform")
.add(caller="fit_transform", callee="fit")
.add(caller="fit_transform", callee="transform")
.add(caller="transform", callee="transform"),
)
return router
| FeatureUnion.get_metadata_routing |
scikit-learn | 51 | sklearn/pipeline.py | def transform(self, X, **params):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
**params : dict, default=None
Parameters routed to the `transform` method of the sub-transformers via the
metadata routing API. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionadded:: 1.5
Returns
-------
X_t : array-like or sparse matrix of shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
| /usr/src/app/target_test_cases/failed_tests_FeatureUnion.transform.txt | def transform(self, X, **params):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
**params : dict, default=None
Parameters routed to the `transform` method of the sub-transformers via the
metadata routing API. See :ref:`Metadata Routing User Guide
<metadata_routing>` for more details.
.. versionadded:: 1.5
Returns
-------
X_t : array-like or sparse matrix of shape (n_samples, sum_n_components)
The `hstack` of results of transformers. `sum_n_components` is the
sum of `n_components` (output dimension) over transformers.
"""
_raise_for_params(params, self, "transform")
if _routing_enabled():
routed_params = process_routing(self, "transform", **params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
for name, _ in self.transformer_list:
routed_params[name] = Bunch(transform={})
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight, params=routed_params[name])
for name, trans, weight in self._iter()
)
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return self._hstack(Xs)
| FeatureUnion.transform |
scikit-learn | 52 | sklearn/model_selection/_classification_threshold.py | def predict(self, X):
"""Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
"""
| /usr/src/app/target_test_cases/failed_tests_FixedThresholdClassifier.predict.txt | def predict(self, X):
"""Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, "estimator_")
y_score, _, response_method_used = _get_response_values_binary(
self.estimator_,
X,
self._get_response_method(),
pos_label=self.pos_label,
return_response_method_used=True,
)
if self.threshold == "auto":
decision_threshold = 0.5 if response_method_used == "predict_proba" else 0.0
else:
decision_threshold = self.threshold
return _threshold_scores_to_class_labels(
y_score, decision_threshold, self.classes_, self.pos_label
)
| FixedThresholdClassifier.predict |
scikit-learn | 53 | sklearn/preprocessing/_function_transformer.py | def transform(self, X):
"""Transform X using the forward function.
Parameters
----------
X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
if `validate=True` else any object that `func` can handle
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
| /usr/src/app/target_test_cases/failed_tests_FunctionTransformer.transform.txt | def transform(self, X):
"""Transform X using the forward function.
Parameters
----------
X : {array-like, sparse-matrix} of shape (n_samples, n_features) \
if `validate=True` else any object that `func` can handle
Input array.
Returns
-------
X_out : array-like, shape (n_samples, n_features)
Transformed input.
"""
X = self._check_input(X, reset=False)
out = self._transform(X, func=self.func, kw_args=self.kw_args)
output_config = _get_output_config("transform", self)["dense"]
if hasattr(out, "columns") and self.feature_names_out is not None:
# check the consistency between the column provided by `transform` and
# the the column names provided by `get_feature_names_out`.
feature_names_out = self.get_feature_names_out()
if list(out.columns) != list(feature_names_out):
# we can override the column names of the output if it is inconsistent
# with the column names provided by `get_feature_names_out` in the
# following cases:
# * `func` preserved the column names between the input and the output
# * the input column names are all numbers
# * the output is requested to be a DataFrame (pandas or polars)
feature_names_in = getattr(
X, "feature_names_in_", _get_feature_names(X)
)
same_feature_names_in_out = feature_names_in is not None and list(
feature_names_in
) == list(out.columns)
not_all_str_columns = not all(
isinstance(col, str) for col in out.columns
)
if same_feature_names_in_out or not_all_str_columns:
adapter = _get_adapter_from_container(out)
out = adapter.create_container(
X_output=out,
X_original=out,
columns=feature_names_out,
inplace=False,
)
else:
raise ValueError(
"The output generated by `func` have different column names "
"than the ones provided by `get_feature_names_out`. "
f"Got output with columns names: {list(out.columns)} and "
"`get_feature_names_out` returned: "
f"{list(self.get_feature_names_out())}. "
"The column names can be overridden by setting "
"`set_output(transform='pandas')` or "
"`set_output(transform='polars')` such that the column names "
"are set to the names provided by `get_feature_names_out`."
)
if self.feature_names_out is None:
warn_msg = (
"When `set_output` is configured to be '{0}', `func` should return "
"a {0} DataFrame to follow the `set_output` API or `feature_names_out`"
" should be defined."
)
if output_config == "pandas" and not _is_pandas_df(out):
warnings.warn(warn_msg.format("pandas"))
elif output_config == "polars" and not _is_polars_df(out):
warnings.warn(warn_msg.format("polars"))
return out
| FunctionTransformer.transform |
scikit-learn | 54 | sklearn/gaussian_process/_gpc.py | def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : object
Returns an instance of self.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessClassifier.fit.txt | def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : object
Returns an instance of self.
"""
if isinstance(self.kernel, CompoundKernel):
raise ValueError("kernel cannot be a CompoundKernel")
if self.kernel is None or self.kernel.requires_vector_input:
X, y = validate_data(
self, X, y, multi_output=False, ensure_2d=True, dtype="numeric"
)
else:
X, y = validate_data(
self, X, y, multi_output=False, ensure_2d=False, dtype=None
)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
kernel=self.kernel,
optimizer=self.optimizer,
n_restarts_optimizer=self.n_restarts_optimizer,
max_iter_predict=self.max_iter_predict,
warm_start=self.warm_start,
copy_X_train=self.copy_X_train,
random_state=self.random_state,
)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError(
"GaussianProcessClassifier requires 2 or more "
"distinct classes; got %d class (only class %s "
"is present)" % (self.n_classes_, self.classes_[0])
)
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = OneVsRestClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = OneVsOneClassifier(
self.base_estimator_, n_jobs=self.n_jobs
)
else:
raise ValueError("Unknown multi-class mode %s" % self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[
estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_
]
)
else:
self.log_marginal_likelihood_value_ = (
self.base_estimator_.log_marginal_likelihood()
)
return self
| GaussianProcessClassifier.fit |
scikit-learn | 55 | sklearn/gaussian_process/_gpc.py | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessClassifier.log_marginal_likelihood.txt | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
check_is_fitted(self)
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient, clone_kernel=clone_kernel
)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC."
)
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[
estimator.log_marginal_likelihood(
theta, clone_kernel=clone_kernel
)
for i, estimator in enumerate(estimators)
]
)
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[
estimator.log_marginal_likelihood(
theta[n_dims * i : n_dims * (i + 1)],
clone_kernel=clone_kernel,
)
for i, estimator in enumerate(estimators)
]
)
else:
raise ValueError(
"Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0], theta.shape[0])
)
| GaussianProcessClassifier.log_marginal_likelihood |
scikit-learn | 56 | sklearn/gaussian_process/_gpc.py | def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessClassifier.predict_proba.txt | def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError(
"one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead."
)
if self.kernel is None or self.kernel.requires_vector_input:
X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False)
else:
X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False)
return self.base_estimator_.predict_proba(X)
| GaussianProcessClassifier.predict_proba |
scikit-learn | 57 | sklearn/gaussian_process/_gpr.py | def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
GaussianProcessRegressor class instance.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.fit.txt | def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
GaussianProcessRegressor class instance.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X, y = validate_data(
self,
X,
y,
multi_output=True,
y_numeric=True,
ensure_2d=ensure_2d,
dtype=dtype,
)
n_targets_seen = y.shape[1] if y.ndim > 1 else 1
if self.n_targets is not None and n_targets_seen != self.n_targets:
raise ValueError(
"The number of targets seen in `y` is different from the parameter "
f"`n_targets`. Got {n_targets_seen} != {self.n_targets}."
)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1
self._y_train_mean = np.zeros(shape=shape_y_stats)
self._y_train_std = np.ones(shape=shape_y_stats)
if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError(
"alpha must be a scalar or an array with same number of "
f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})"
)
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
(
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta, clone_kernel=False
)
# Precompute quantities required for predictions which are independent
# of actual query points
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError as exc:
exc.args = (
(
f"The kernel, {self.kernel_}, is not returning a positive "
"definite matrix. Try gradually increasing the 'alpha' "
"parameter of your GaussianProcessRegressor estimator."
),
) + exc.args
raise
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
self.alpha_ = cho_solve(
(self.L_, GPR_CHOLESKY_LOWER),
self.y_train_,
check_finite=False,
)
return self
| GaussianProcessRegressor.fit |
scikit-learn | 58 | sklearn/gaussian_process/_gpr.py | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.log_marginal_likelihood.txt | def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False)
# Alg 2.1, page 19, line 7
# -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi)
# y is originally thought to be a (1, n_samples) row vector. However,
# in multioutputs, y is of shape (n_samples, 2) and we need to compute
# y^T . alpha for each output, independently using einsum. Thus, it
# is equivalent to:
# for output_idx in range(n_outputs):
# log_likelihood_dims[output_idx] = (
# y_train[:, [output_idx]] @ alpha[:, [output_idx]]
# )
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
# the log likehood is sum-up across the outputs
log_likelihood = log_likelihood_dims.sum(axis=-1)
if eval_gradient:
# Eq. 5.9, p. 114, and footnote 5 in p. 114
# 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient)
# alpha is supposed to be a vector of (n_samples,) elements. With
# multioutputs, alpha is a matrix of size (n_samples, n_outputs).
# Therefore, we want to construct a matrix of
# (n_samples, n_samples, n_outputs) equivalent to
# for output_idx in range(n_outputs):
# output_alpha = alpha[:, [output_idx]]
# inner_term[..., output_idx] = output_alpha @ output_alpha.T
inner_term = np.einsum("ik,jk->ijk", alpha, alpha)
# compute K^-1 of shape (n_samples, n_samples)
K_inv = cho_solve(
(L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False
)
# create a new axis to use broadcasting between inner_term and
# K_inv
inner_term -= K_inv[..., np.newaxis]
# Since we are interested about the trace of
# inner_term @ K_gradient, we don't explicitly compute the
# matrix-by-matrix operation and instead use an einsum. Therefore
# it is equivalent to:
# for param_idx in range(n_kernel_params):
# for output_idx in range(n_output):
# log_likehood_gradient_dims[param_idx, output_idx] = (
# inner_term[..., output_idx] @
# K_gradient[..., param_idx]
# )
log_likelihood_gradient_dims = 0.5 * np.einsum(
"ijl,jik->kl", inner_term, K_gradient
)
# the log likehood gradient is the sum-up across the outputs
log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
| GaussianProcessRegressor.log_marginal_likelihood |
scikit-learn | 59 | sklearn/gaussian_process/_gpr.py | def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model.
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution at query points.
y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples) or \
(n_samples, n_samples, n_targets), optional
Covariance of joint predictive distribution at query points.
Only returned when `return_cov` is True.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.predict.txt | def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model.
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution at query points.
y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples) or \
(n_samples, n_samples, n_targets), optional
Covariance of joint predictive distribution at query points.
Only returned when `return_cov` is True.
"""
if return_std and return_cov:
raise RuntimeError(
"At most one of return_std or return_cov can be requested."
)
if self.kernel is None or self.kernel.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X = validate_data(self, X, ensure_2d=ensure_2d, dtype=dtype, reset=False)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
kernel = self.kernel
n_targets = self.n_targets if self.n_targets is not None else 1
y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze()
if return_cov:
y_cov = kernel(X)
if n_targets > 1:
y_cov = np.repeat(
np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1
)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
if n_targets > 1:
y_var = np.repeat(
np.expand_dims(y_var, -1), repeats=n_targets, axis=-1
)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
# Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans @ self.alpha_
# undo normalisation
y_mean = self._y_train_std * y_mean + self._y_train_mean
# if y_mean has shape (n_samples, 1), reshape to (n_samples,)
if y_mean.ndim > 1 and y_mean.shape[1] == 1:
y_mean = np.squeeze(y_mean, axis=1)
# Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T
V = solve_triangular(
self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False
)
if return_cov:
# Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v
y_cov = self.kernel_(X) - V.T @ V
# undo normalisation
y_cov = np.outer(y_cov, self._y_train_std**2).reshape(*y_cov.shape, -1)
# if y_cov has shape (n_samples, n_samples, 1), reshape to
# (n_samples, n_samples)
if y_cov.shape[2] == 1:
y_cov = np.squeeze(y_cov, axis=2)
return y_mean, y_cov
elif return_std:
# Compute variance of predictive distribution
# Use einsum to avoid explicitly forming the large matrix
# V^T @ V just to extract its diagonal afterward.
y_var = self.kernel_.diag(X).copy()
y_var -= np.einsum("ij,ji->i", V.T, V)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn(
"Predicted variances smaller than 0. "
"Setting those variances to 0."
)
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = np.outer(y_var, self._y_train_std**2).reshape(*y_var.shape, -1)
# if y_var has shape (n_samples, 1), reshape to (n_samples,)
if y_var.shape[1] == 1:
y_var = np.squeeze(y_var, axis=1)
return y_mean, np.sqrt(y_var)
else:
return y_mean
| GaussianProcessRegressor.predict |
scikit-learn | 60 | sklearn/gaussian_process/_gpr.py | def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
Number of samples drawn from the Gaussian process per query point.
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term:`Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, n_samples), or \
(n_samples_X, n_targets, n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
| /usr/src/app/target_test_cases/failed_tests_GaussianProcessRegressor.sample_y.txt | def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
Number of samples drawn from the Gaussian process per query point.
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term:`Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, n_samples), or \
(n_samples_X, n_targets, n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = [
rng.multivariate_normal(
y_mean[:, target], y_cov[..., target], n_samples
).T[:, np.newaxis]
for target in range(y_mean.shape[1])
]
y_samples = np.hstack(y_samples)
return y_samples
| GaussianProcessRegressor.sample_y |
scikit-learn | 61 | sklearn/ensemble/_gb.py | def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : ndarray of shape (n_samples, n_classes) or (n_samples,)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
order of the classes corresponds to that in the attribute
:term:`classes_`. Regression and binary classification produce an
array of shape (n_samples,).
"""
| /usr/src/app/target_test_cases/failed_tests_GradientBoostingClassifier.decision_function.txt | def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
score : ndarray of shape (n_samples, n_classes) or (n_samples,)
The decision function of the input samples, which corresponds to
the raw values predicted from the trees of the ensemble . The
order of the classes corresponds to that in the attribute
:term:`classes_`. Regression and binary classification produce an
array of shape (n_samples,).
"""
X = validate_data(
self, X, dtype=DTYPE, order="C", accept_sparse="csr", reset=False
)
raw_predictions = self._raw_predict(X)
if raw_predictions.shape[1] == 1:
return raw_predictions.ravel()
return raw_predictions
| GradientBoostingClassifier.decision_function |
scikit-learn | 62 | sklearn/ensemble/_gb.py | def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
| /usr/src/app/target_test_cases/failed_tests_GradientBoostingClassifier.staged_predict.txt | def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
if self.n_classes_ == 2: # n_trees_per_iteration_ = 1
for raw_predictions in self._staged_raw_predict(X):
encoded_classes = (raw_predictions.squeeze() >= 0).astype(int)
yield self.classes_.take(encoded_classes, axis=0)
else:
for raw_predictions in self._staged_raw_predict(X):
encoded_classes = np.argmax(raw_predictions, axis=1)
yield self.classes_.take(encoded_classes, axis=0)
| GradientBoostingClassifier.staged_predict |
scikit-learn | 63 | sklearn/ensemble/_gb.py | def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
| /usr/src/app/target_test_cases/failed_tests_GradientBoostingClassifier.staged_predict_proba.txt | def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted value of the input samples.
"""
try:
for raw_predictions in self._staged_raw_predict(X):
yield self._loss.predict_proba(raw_predictions)
except NotFittedError:
raise
except AttributeError as e:
raise AttributeError(
"loss=%r does not support predict_proba" % self.loss
) from e
| GradientBoostingClassifier.staged_predict_proba |
scikit-learn | 64 | sklearn/linear_model/_huber.py | def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
Weight given to each sample.
Returns
-------
self : object
Fitted `HuberRegressor` estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_HuberRegressor.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,)
Weight given to each sample.
Returns
-------
self : object
Fitted `HuberRegressor` estimator.
"""
X, y = validate_data(
self,
X,
y,
copy=False,
accept_sparse=["csr"],
y_numeric=True,
dtype=[np.float64, np.float32],
)
sample_weight = _check_sample_weight(sample_weight, X)
if self.warm_start and hasattr(self, "coef_"):
parameters = np.concatenate((self.coef_, [self.intercept_, self.scale_]))
else:
if self.fit_intercept:
parameters = np.zeros(X.shape[1] + 2)
else:
parameters = np.zeros(X.shape[1] + 1)
# Make sure to initialize the scale parameter to a strictly
# positive value:
parameters[-1] = 1
# Sigma or the scale factor should be non-negative.
# Setting it to be zero might cause undefined bounds hence we set it
# to a value close to zero.
bounds = np.tile([-np.inf, np.inf], (parameters.shape[0], 1))
bounds[-1][0] = np.finfo(np.float64).eps * 10
opt_res = optimize.minimize(
_huber_loss_and_gradient,
parameters,
method="L-BFGS-B",
jac=True,
args=(X, y, self.epsilon, self.alpha, sample_weight),
options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1},
bounds=bounds,
)
parameters = opt_res.x
if opt_res.status == 2:
raise ValueError(
"HuberRegressor convergence failed: l-BFGS-b solver terminated with %s"
% opt_res.message
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.scale_ = parameters[-1]
if self.fit_intercept:
self.intercept_ = parameters[-2]
else:
self.intercept_ = 0.0
self.coef_ = parameters[: X.shape[1]]
residual = np.abs(y - safe_sparse_dot(X, self.coef_) - self.intercept_)
self.outliers_ = residual > self.scale_ * self.epsilon
return self
| HuberRegressor.fit |
scikit-learn | 65 | sklearn/decomposition/_incremental_pca.py | def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_IncrementalPCA.fit.txt | def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "lil"],
copy=self.copy,
dtype=[np.float64, np.float32],
force_writeable=True,
)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
X_batch = X[batch]
if sparse.issparse(X_batch):
X_batch = X_batch.toarray()
self.partial_fit(X_batch, check_input=False)
return self
| IncrementalPCA.fit |
scikit-learn | 66 | sklearn/decomposition/_incremental_pca.py | def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
check_input : bool, default=True
Run check_array on X.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_IncrementalPCA.partial_fit.txt | def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
check_input : bool, default=True
Run check_array on X.
Returns
-------
self : object
Returns the instance itself.
"""
first_pass = not hasattr(self, "components_")
if check_input:
if sparse.issparse(X):
raise TypeError(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
)
X = validate_data(
self,
X,
copy=self.copy,
dtype=[np.float64, np.float32],
force_writeable=True,
reset=first_pass,
)
n_samples, n_features = X.shape
if first_pass:
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not self.n_components <= n_features:
raise ValueError(
"n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features)
)
elif not self.n_components <= n_samples:
raise ValueError(
"n_components=%r must be less or equal to "
"the batch number of samples "
"%d." % (self.n_components, n_samples)
)
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (
self.components_.shape[0] != self.n_components_
):
raise ValueError(
"Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value."
% (self.components_.shape[0], self.n_components_)
)
# This is the first partial_fit
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
# Update stats - they are 0 if this is the first step
col_mean, col_var, n_total_samples = _incremental_mean_and_var(
X,
last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
)
n_total_samples = n_total_samples[0]
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt(
(self.n_samples_seen_ / n_total_samples) * n_samples
) * (self.mean_ - col_batch_mean)
X = np.vstack(
(
self.singular_values_.reshape((-1, 1)) * self.components_,
X,
mean_correction,
)
)
U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S**2 / (n_total_samples - 1)
explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[: self.n_components_]
self.singular_values_ = S[: self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[: self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
# we already checked `self.n_components <= n_samples` above
if self.n_components_ not in (n_samples, n_features):
self.noise_variance_ = explained_variance[self.n_components_ :].mean()
else:
self.noise_variance_ = 0.0
return self
| IncrementalPCA.partial_fit |
scikit-learn | 67 | sklearn/ensemble/_iforest.py | def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_IsolationForest.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self, X, accept_sparse=["csc"], dtype=tree_dtype, ensure_all_finite=False
)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, str) and self.max_samples == "auto":
max_samples = min(256, n_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # max_samples is float
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X,
y,
max_samples,
max_depth=max_depth,
sample_weight=sample_weight,
check_input=False,
)
self._average_path_length_per_tree, self._decision_path_lengths = zip(
*[
(
_average_path_length(tree.tree_.n_node_samples),
tree.tree_.compute_node_depths(),
)
for tree in self.estimators_
]
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# Else, define offset_ wrt contamination parameter
# To avoid performing input validation a second time we call
# _score_samples rather than score_samples.
# _score_samples expects a CSR matrix, so we convert if necessary.
if issparse(X):
X = X.tocsr()
self.offset_ = np.percentile(self._score_samples(X), 100.0 * self.contamination)
return self
| IsolationForest.fit |
scikit-learn | 68 | sklearn/manifold/_isomap.py | def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
"""
| /usr/src/app/target_test_cases/failed_tests_Isomap.transform.txt | def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
"""
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
else:
distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
if hasattr(X, "dtype") and X.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float64
G_X = np.zeros((n_queries, n_samples_fit), dtype)
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| Isomap.transform |
scikit-learn | 69 | sklearn/isotonic.py | def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,) or (n_samples, 1)
Training data.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
"""
| /usr/src/app/target_test_cases/failed_tests_IsotonicRegression.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,) or (n_samples, 1)
Training data.
.. versionchanged:: 0.24
Also accepts 2d array with 1 feature.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
"""
check_params = dict(accept_sparse=False, ensure_2d=False)
X = check_array(
X, input_name="X", dtype=[np.float64, np.float32], **check_params
)
y = check_array(y, input_name="y", dtype=X.dtype, **check_params)
check_consistent_length(X, y, sample_weight)
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self.X_thresholds_, self.y_thresholds_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
| IsotonicRegression.fit |
scikit-learn | 70 | sklearn/impute/_iterative.py | def fit_transform(self, X, y=None, **params):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
| /usr/src/app/target_test_cases/failed_tests_IterativeImputer.fit_transform.txt | def fit_transform(self, X, y=None, **params):
"""Fit the imputer on `X` and return the transformed `X`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : dict
Parameters routed to the `fit` method of the sub-estimator via the
metadata routing API.
.. versionadded:: 1.5
Only available if
`sklearn.set_config(enable_metadata_routing=True)` is set. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
_raise_for_params(params, self, "fit")
routed_params = process_routing(
self,
"fit",
**params,
)
self.random_state_ = getattr(
self, "random_state_", check_random_state(self.random_state)
)
if self.estimator is None:
from ..linear_model import BayesianRidge
self._estimator = BayesianRidge()
else:
self._estimator = clone(self.estimator)
self.imputation_sequence_ = []
self.initial_imputer_ = None
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
X, in_fit=True
)
super()._fit_indicator(complete_mask)
X_indicator = super()._transform_indicator(complete_mask)
if self.max_iter == 0 or np.all(mask_missing_values):
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
# Edge case: a single feature, we return the initial imputation.
if Xt.shape[1] == 1:
self.n_iter_ = 0
return super()._concatenate_indicator(Xt, X_indicator)
self._min_value = self._validate_limit(self.min_value, "min", X.shape[1])
self._max_value = self._validate_limit(self.max_value, "max", X.shape[1])
if not np.all(np.greater(self._max_value, self._min_value)):
raise ValueError("One (or more) features have min_value >= max_value.")
# order in which to impute
# note this is probably too slow for large feature data (d > 100000)
# and a better way would be good.
# see: https://goo.gl/KyCNwj and subsequent comments
ordered_idx = self._get_ordered_idx(mask_missing_values)
self.n_features_with_missing_ = len(ordered_idx)
abs_corr_mat = self._get_abs_corr_mat(Xt)
n_samples, n_features = Xt.shape
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
start_t = time()
if not self.sample_posterior:
Xt_previous = Xt.copy()
normalized_tol = self.tol * np.max(np.abs(X[~mask_missing_values]))
for self.n_iter_ in range(1, self.max_iter + 1):
if self.imputation_order == "random":
ordered_idx = self._get_ordered_idx(mask_missing_values)
for feat_idx in ordered_idx:
neighbor_feat_idx = self._get_neighbor_feat_idx(
n_features, feat_idx, abs_corr_mat
)
Xt, estimator = self._impute_one_feature(
Xt,
mask_missing_values,
feat_idx,
neighbor_feat_idx,
estimator=None,
fit_mode=True,
params=routed_params.estimator.fit,
)
estimator_triplet = _ImputerTriplet(
feat_idx, neighbor_feat_idx, estimator
)
self.imputation_sequence_.append(estimator_triplet)
if self.verbose > 1:
print(
"[IterativeImputer] Ending imputation round "
"%d/%d, elapsed time %0.2f"
% (self.n_iter_, self.max_iter, time() - start_t)
)
if not self.sample_posterior:
inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None)
if self.verbose > 0:
print(
"[IterativeImputer] Change: {}, scaled tolerance: {} ".format(
inf_norm, normalized_tol
)
)
if inf_norm < normalized_tol:
if self.verbose > 0:
print("[IterativeImputer] Early stopping criterion reached.")
break
Xt_previous = Xt.copy()
else:
if not self.sample_posterior:
warnings.warn(
"[IterativeImputer] Early stopping criterion not reached.",
ConvergenceWarning,
)
_assign_where(Xt, X, cond=~mask_missing_values)
return super()._concatenate_indicator(Xt, X_indicator)
| IterativeImputer.fit_transform |
scikit-learn | 71 | sklearn/impute/_iterative.py | def transform(self, X):
"""Impute all missing values in `X`.
Note that this is stochastic, and that if `random_state` is not fixed,
repeated calls, or permuted input, results will differ.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
| /usr/src/app/target_test_cases/failed_tests_IterativeImputer.transform.txt | def transform(self, X):
"""Impute all missing values in `X`.
Note that this is stochastic, and that if `random_state` is not fixed,
repeated calls, or permuted input, results will differ.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : array-like, shape (n_samples, n_features)
The imputed input data.
"""
check_is_fitted(self)
X, Xt, mask_missing_values, complete_mask = self._initial_imputation(
X, in_fit=False
)
X_indicator = super()._transform_indicator(complete_mask)
if self.n_iter_ == 0 or np.all(mask_missing_values):
return super()._concatenate_indicator(Xt, X_indicator)
imputations_per_round = len(self.imputation_sequence_) // self.n_iter_
i_rnd = 0
if self.verbose > 0:
print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,))
start_t = time()
for it, estimator_triplet in enumerate(self.imputation_sequence_):
Xt, _ = self._impute_one_feature(
Xt,
mask_missing_values,
estimator_triplet.feat_idx,
estimator_triplet.neighbor_feat_idx,
estimator=estimator_triplet.estimator,
fit_mode=False,
)
if not (it + 1) % imputations_per_round:
if self.verbose > 1:
print(
"[IterativeImputer] Ending imputation round "
"%d/%d, elapsed time %0.2f"
% (i_rnd + 1, self.n_iter_, time() - start_t)
)
i_rnd += 1
_assign_where(Xt, X, cond=~mask_missing_values)
return super()._concatenate_indicator(Xt, X_indicator)
| IterativeImputer.transform |
scikit-learn | 72 | sklearn/preprocessing/_discretization.py | def fit(self, X, y=None, sample_weight=None):
"""
Fit the estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : ndarray of shape (n_samples,)
Contains weight values to be associated with each sample.
Cannot be used when `strategy` is set to `"uniform"`.
.. versionadded:: 1.3
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_KBinsDiscretizer.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""
Fit the estimator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : ndarray of shape (n_samples,)
Contains weight values to be associated with each sample.
Cannot be used when `strategy` is set to `"uniform"`.
.. versionadded:: 1.3
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, dtype="numeric")
if self.dtype in (np.float64, np.float32):
output_dtype = self.dtype
else: # self.dtype is None
output_dtype = X.dtype
n_samples, n_features = X.shape
if sample_weight is not None and self.strategy == "uniform":
raise ValueError(
"`sample_weight` was provided but it cannot be "
"used with strategy='uniform'. Got strategy="
f"{self.strategy!r} instead."
)
if self.subsample is not None and n_samples > self.subsample:
# Take a subsample of `X`
X = resample(
X,
replace=False,
n_samples=self.subsample,
random_state=self.random_state,
)
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
bin_edges = np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn(
"Feature %d is constant and will be replaced with 0." % jj
)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == "uniform":
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == "quantile":
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
if sample_weight is None:
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
else:
bin_edges[jj] = np.asarray(
[
_weighted_percentile(column, sample_weight, q)
for q in quantiles
],
dtype=np.float64,
)
elif self.strategy == "kmeans":
from ..cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(
column[:, None], sample_weight=sample_weight
).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ("quantile", "kmeans"):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn(
"Bins whose width are too small (i.e., <= "
"1e-8) in feature %d are removed. Consider "
"decreasing the number of bins." % jj
)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if "onehot" in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse_output=self.encode == "onehot",
dtype=output_dtype,
)
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_))))
return self
| KBinsDiscretizer.fit |
scikit-learn | 73 | sklearn/preprocessing/_discretization.py | def inverse_transform(self, X=None, *, Xt=None):
"""
Transform discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Transformed data in the binned space.
Xt : array-like of shape (n_samples, n_features)
Transformed data in the binned space.
.. deprecated:: 1.5
`Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead.
Returns
-------
Xinv : ndarray, dtype={np.float32, np.float64}
Data in the original feature space.
"""
| /usr/src/app/target_test_cases/failed_tests_KBinsDiscretizer.inverse_transform.txt | def inverse_transform(self, X=None, *, Xt=None):
"""
Transform discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Transformed data in the binned space.
Xt : array-like of shape (n_samples, n_features)
Transformed data in the binned space.
.. deprecated:: 1.5
`Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead.
Returns
-------
Xinv : ndarray, dtype={np.float32, np.float64}
Data in the original feature space.
"""
X = _deprecate_Xt_in_inverse_transform(X, Xt)
check_is_fitted(self)
if "onehot" in self.encode:
X = self._encoder.inverse_transform(X)
Xinv = check_array(X, copy=True, dtype=(np.float64, np.float32))
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError(
"Incorrect number of features. Expecting {}, received {}.".format(
n_features, Xinv.shape[1]
)
)
for jj in range(n_features):
bin_edges = self.bin_edges_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
Xinv[:, jj] = bin_centers[(Xinv[:, jj]).astype(np.int64)]
return Xinv
| KBinsDiscretizer.inverse_transform |
scikit-learn | 74 | sklearn/preprocessing/_discretization.py | def transform(self, X):
"""
Discretize the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}
Data in the binned space. Will be a sparse matrix if
`self.encode='onehot'` and ndarray otherwise.
"""
| /usr/src/app/target_test_cases/failed_tests_KBinsDiscretizer.transform.txt | def transform(self, X):
"""
Discretize the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : {ndarray, sparse matrix}, dtype={np.float32, np.float64}
Data in the binned space. Will be a sparse matrix if
`self.encode='onehot'` and ndarray otherwise.
"""
check_is_fitted(self)
# check input and attribute dtypes
dtype = (np.float64, np.float32) if self.dtype is None else self.dtype
Xt = validate_data(self, X, copy=True, dtype=dtype, reset=False)
bin_edges = self.bin_edges_
for jj in range(Xt.shape[1]):
Xt[:, jj] = np.searchsorted(bin_edges[jj][1:-1], Xt[:, jj], side="right")
if self.encode == "ordinal":
return Xt
dtype_init = None
if "onehot" in self.encode:
dtype_init = self._encoder.dtype
self._encoder.dtype = Xt.dtype
try:
Xt_enc = self._encoder.transform(Xt)
finally:
# revert the initial dtype to avoid modifying self.
self._encoder.dtype = dtype_init
return Xt_enc
| KBinsDiscretizer.transform |
scikit-learn | 75 | sklearn/cluster/_kmeans.py | def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_KMeans.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params_vs_input(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
# Validate init array
init = self.init
init_is_array_like = _is_arraylike_not_scalar(init)
if init_is_array_like:
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if init_is_array_like:
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if self._algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
best_inertia, best_labels = None, None
for i in range(self._n_init):
# Initialize centers
centers_init = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=random_state,
sample_weight=sample_weight,
)
if self.verbose:
print("Initialization complete")
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X,
sample_weight,
centers_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self._tol,
n_threads=self._n_threads,
)
# determine if these results are the best so far
# we chose a new run if it has a better inertia and the clustering is
# different from the best so far (it's possible that the inertia is
# slightly better even if the clustering is the same with potentially
# permuted labels, due to rounding errors)
if best_inertia is None or (
inertia < best_inertia
and not _is_same_clustering(labels, best_labels, self.n_clusters)
):
best_labels = labels
best_centers = centers
best_inertia = inertia
best_n_iter = n_iter_
if not sp.issparse(X):
if not self.copy_x:
X += X_mean
best_centers += X_mean
distinct_clusters = len(set(best_labels))
if distinct_clusters < self.n_clusters:
warnings.warn(
"Number of distinct clusters ({}) found smaller than "
"n_clusters ({}). Possibly due to duplicate points "
"in X.".format(distinct_clusters, self.n_clusters),
ConvergenceWarning,
stacklevel=2,
)
self.cluster_centers_ = best_centers
self._n_features_out = self.cluster_centers_.shape[0]
self.labels_ = best_labels
self.inertia_ = best_inertia
self.n_iter_ = best_n_iter
return self
| KMeans.fit |
scikit-learn | 76 | sklearn/impute/_knn.py | def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
The fitted `KNNImputer` class instance.
"""
| /usr/src/app/target_test_cases/failed_tests_KNNImputer.fit.txt | def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
The fitted `KNNImputer` class instance.
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
self._valid_mask = ~np.all(self._mask_fit_X, axis=0)
super()._fit_indicator(self._mask_fit_X)
return self
| KNNImputer.fit |
scikit-learn | 77 | sklearn/impute/_knn.py | def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
| /usr/src/app/target_test_cases/failed_tests_KNNImputer.transform.txt | def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
ensure_all_finite = True
else:
ensure_all_finite = "allow-nan"
X = validate_data(
self,
X,
accept_sparse=False,
dtype=FLOAT_DTYPES,
force_writeable=True,
ensure_all_finite=ensure_all_finite,
copy=self.copy,
reset=False,
)
mask = _get_mask(X, self.missing_values)
mask_fit_X = self._mask_fit_X
valid_mask = self._valid_mask
X_indicator = super()._transform_indicator(mask)
# Removes columns where the training data is all nan
if not np.any(mask[:, valid_mask]):
# No missing values in X
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
# Even if there are no missing values in X, we still concatenate Xc
# with the missing value indicator matrix, X_indicator.
# This is to ensure that the output maintains consistency in terms
# of columns, regardless of whether missing values exist in X or not.
return super()._concatenate_indicator(Xc, X_indicator)
row_missing_idx = np.flatnonzero(mask[:, valid_mask].any(axis=1))
non_missing_fix_X = np.logical_not(mask_fit_X)
# Maps from indices from X to indices in dist matrix
dist_idx_map = np.zeros(X.shape[0], dtype=int)
dist_idx_map[row_missing_idx] = np.arange(row_missing_idx.shape[0])
def process_chunk(dist_chunk, start):
row_missing_chunk = row_missing_idx[start : start + len(dist_chunk)]
# Find and impute missing by column
for col in range(X.shape[1]):
if not valid_mask[col]:
# column was all missing during training
continue
col_mask = mask[row_missing_chunk, col]
if not np.any(col_mask):
# column has no missing values
continue
(potential_donors_idx,) = np.nonzero(non_missing_fix_X[:, col])
# receivers_idx are indices in X
receivers_idx = row_missing_chunk[np.flatnonzero(col_mask)]
# distances for samples that needed imputation for column
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
# receivers with all nan distances impute with mean
all_nan_dist_mask = np.isnan(dist_subset).all(axis=1)
all_nan_receivers_idx = receivers_idx[all_nan_dist_mask]
if all_nan_receivers_idx.size:
col_mean = np.ma.array(
self._fit_X[:, col], mask=mask_fit_X[:, col]
).mean()
X[all_nan_receivers_idx, col] = col_mean
if len(all_nan_receivers_idx) == len(receivers_idx):
# all receivers imputed with mean
continue
# receivers with at least one defined distance
receivers_idx = receivers_idx[~all_nan_dist_mask]
dist_subset = dist_chunk[dist_idx_map[receivers_idx] - start][
:, potential_donors_idx
]
n_neighbors = min(self.n_neighbors, len(potential_donors_idx))
value = self._calc_impute(
dist_subset,
n_neighbors,
self._fit_X[potential_donors_idx, col],
mask_fit_X[potential_donors_idx, col],
)
X[receivers_idx, col] = value
# process in fixed-memory chunks
gen = pairwise_distances_chunked(
X[row_missing_idx, :],
self._fit_X,
metric=self.metric,
missing_values=self.missing_values,
ensure_all_finite=ensure_all_finite,
reduce_func=process_chunk,
)
for chunk in gen:
# process_chunk modifies X in place. No return value.
pass
if self.keep_empty_features:
Xc = X
Xc[:, ~valid_mask] = 0
else:
Xc = X[:, valid_mask]
return super()._concatenate_indicator(Xc, X_indicator)
| KNNImputer.transform |
scikit-learn | 78 | sklearn/neighbors/_classification.py | def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
| /usr/src/app/target_test_cases/failed_tests_KNeighborsClassifier.predict.txt | def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
check_is_fitted(self, "_fit_method")
if self.weights == "uniform":
if self._fit_method == "brute" and ArgKminClassMode.is_usable_for(
X, self._fit_X, self.metric
):
probabilities = self.predict_proba(X)
if self.outputs_2d_:
return np.stack(
[
self.classes_[idx][np.argmax(probas, axis=1)]
for idx, probas in enumerate(probabilities)
],
axis=1,
)
return self.classes_[np.argmax(probabilities, axis=1)]
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = _num_samples(X)
weights = _get_weights(neigh_dist, self.weights)
if weights is not None and _all_with_any_reduction_axis_1(weights, value=0):
raise ValueError(
"All neighbors of some sample is getting zero weights. "
"Please modify 'weights' to avoid this case if you are "
"using a user-defined function."
)
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = _mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| KNeighborsClassifier.predict |
scikit-learn | 79 | sklearn/neighbors/_regression.py | def predict(self, X):
"""Predict the target for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int
Target values.
"""
| /usr/src/app/target_test_cases/failed_tests_KNeighborsRegressor.predict.txt | def predict(self, X):
"""Predict the target for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int
Target values.
"""
if self.weights == "uniform":
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| KNeighborsRegressor.predict |
scikit-learn | 80 | sklearn/neighbors/_kde.py | def fit(self, X, y=None, sample_weight=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : array-like of shape (n_samples,), default=None
List of sample weights attached to the data X.
.. versionadded:: 0.20
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_KernelDensity.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : array-like of shape (n_samples,), default=None
List of sample weights attached to the data X.
.. versionadded:: 0.20
Returns
-------
self : object
Returns the instance itself.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
if isinstance(self.bandwidth, str):
if self.bandwidth == "scott":
self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))
elif self.bandwidth == "silverman":
self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (
-1 / (X.shape[1] + 4)
)
else:
self.bandwidth_ = self.bandwidth
X = validate_data(self, X, order="C", dtype=np.float64)
if sample_weight is not None:
sample_weight = _check_sample_weight(
sample_weight, X, dtype=np.float64, ensure_non_negative=True
)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](
X,
metric=self.metric,
leaf_size=self.leaf_size,
sample_weight=sample_weight,
**kwargs,
)
return self
| KernelDensity.fit |
scikit-learn | 81 | sklearn/neighbors/_kde.py | def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to generate
random samples. Pass an int for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array-like of shape (n_samples, n_features)
List of samples.
"""
| /usr/src/app/target_test_cases/failed_tests_KernelDensity.sample.txt | def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to generate
random samples. Pass an int for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array-like of shape (n_samples, n_features)
List of samples.
"""
check_is_fitted(self)
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ["gaussian", "tophat"]:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
u = rng.uniform(0, 1, size=n_samples)
if self.tree_.sample_weight is None:
i = (u * data.shape[0]).astype(np.int64)
else:
cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
sum_weight = cumsum_weight[-1]
i = np.searchsorted(cumsum_weight, u * sum_weight)
if self.kernel == "gaussian":
return np.atleast_2d(rng.normal(data[i], self.bandwidth_))
elif self.kernel == "tophat":
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (
gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim)
* self.bandwidth_
/ np.sqrt(s_sq)
)
return data[i] + X * correction[:, np.newaxis]
| KernelDensity.sample |
scikit-learn | 82 | sklearn/neighbors/_kde.py | def score_samples(self, X):
"""Compute the log-likelihood of each sample under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray of shape (n_samples,)
Log-likelihood of each sample in `X`. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
| /usr/src/app/target_test_cases/failed_tests_KernelDensity.score_samples.txt | def score_samples(self, X):
"""Compute the log-likelihood of each sample under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray of shape (n_samples,)
Log-likelihood of each sample in `X`. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
check_is_fitted(self)
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = validate_data(self, X, order="C", dtype=np.float64, reset=False)
if self.tree_.sample_weight is None:
N = self.tree_.data.shape[0]
else:
N = self.tree_.sum_weight
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X,
h=self.bandwidth_,
kernel=self.kernel,
atol=atol_N,
rtol=self.rtol,
breadth_first=self.breadth_first,
return_log=True,
)
log_density -= np.log(N)
return log_density
| KernelDensity.score_samples |
scikit-learn | 83 | sklearn/preprocessing/_label.py | def fit(self, y):
"""Fit label binarizer.
Parameters
----------
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_LabelBinarizer.fit.txt | def fit(self, y):
"""Fit label binarizer.
Parameters
----------
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : object
Returns the instance itself.
"""
if self.neg_label >= self.pos_label:
raise ValueError(
f"neg_label={self.neg_label} must be strictly less than "
f"pos_label={self.pos_label}."
)
if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0):
raise ValueError(
"Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
f"pos_label={self.pos_label} and neg_label={self.neg_label}"
)
self.y_type_ = type_of_target(y, input_name="y")
if "multioutput" in self.y_type_:
raise ValueError(
"Multioutput target data is not supported with label binarization"
)
if _num_samples(y) == 0:
raise ValueError("y has 0 samples: %r" % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
| LabelBinarizer.fit |
scikit-learn | 84 | sklearn/preprocessing/_label.py | def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels.
Parameters
----------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float, default=None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of :term:`decision_function`
(classifier).
Use 0.5 when ``Y`` contains the output of :term:`predict_proba`.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : {ndarray, sparse matrix} of shape (n_samples,)
Target values. Sparse matrix will be of CSR format.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), :meth:`inverse_transform` chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's :term:`decision_function` method directly as the input
of :meth:`inverse_transform`.
"""
| /usr/src/app/target_test_cases/failed_tests_LabelBinarizer.inverse_transform.txt | def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels.
Parameters
----------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float, default=None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of :term:`decision_function`
(classifier).
Use 0.5 when ``Y`` contains the output of :term:`predict_proba`.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : {ndarray, sparse matrix} of shape (n_samples,)
Target values. Sparse matrix will be of CSR format.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), :meth:`inverse_transform` chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's :term:`decision_function` method directly as the input
of :meth:`inverse_transform`.
"""
check_is_fitted(self)
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.0
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(
Y, self.y_type_, self.classes_, threshold
)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
| LabelBinarizer.inverse_transform |
scikit-learn | 85 | sklearn/preprocessing/_label.py | def transform(self, y):
"""Transform multi-class labels to binary labels.
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : {array, sparse matrix} of shape (n_samples,) or \
(n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix
will be of CSR format.
"""
| /usr/src/app/target_test_cases/failed_tests_LabelBinarizer.transform.txt | def transform(self, y):
"""Transform multi-class labels to binary labels.
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : {array, sparse matrix} of shape (n_samples,) or \
(n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix
will be of CSR format.
"""
check_is_fitted(self)
y_is_multilabel = type_of_target(y).startswith("multilabel")
if y_is_multilabel and not self.y_type_.startswith("multilabel"):
raise ValueError("The object was not fitted with multilabel input.")
return label_binarize(
y,
classes=self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output,
)
| LabelBinarizer.transform |
scikit-learn | 86 | sklearn/preprocessing/_label.py | def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
y : ndarray of shape (n_samples,)
Original encoding.
"""
| /usr/src/app/target_test_cases/failed_tests_LabelEncoder.inverse_transform.txt | def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
y : ndarray of shape (n_samples,)
Original encoding.
"""
check_is_fitted(self)
xp, _ = get_namespace(y)
y = column_or_1d(y, warn=True)
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return xp.asarray([])
diff = _setdiff1d(
ar1=y,
ar2=xp.arange(self.classes_.shape[0], device=device(y)),
xp=xp,
)
if diff.shape[0]:
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y = xp.asarray(y)
return xp.take(self.classes_, y, axis=0)
| LabelEncoder.inverse_transform |
scikit-learn | 87 | sklearn/linear_model/_least_angle.py | def fit(self, X, y, copy_X=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
Returns an instance of self.
"""
| /usr/src/app/target_test_cases/failed_tests_LassoLarsIC.fit.txt | def fit(self, X, y, copy_X=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
Returns an instance of self.
"""
if copy_X is None:
copy_X = self.copy_X
X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True)
X, y, Xmean, ymean, Xstd = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=copy_X
)
Gram = self.precompute
alphas_, _, coef_path_, self.n_iter_ = lars_path(
X,
y,
Gram=Gram,
copy_X=copy_X,
copy_Gram=True,
alpha_min=0.0,
method="lasso",
verbose=self.verbose,
max_iter=self.max_iter,
eps=self.eps,
return_n_iter=True,
positive=self.positive,
)
n_samples = X.shape[0]
if self.criterion == "aic":
criterion_factor = 2
elif self.criterion == "bic":
criterion_factor = log(n_samples)
else:
raise ValueError(
f"criterion should be either bic or aic, got {self.criterion!r}"
)
residuals = y[:, np.newaxis] - np.dot(X, coef_path_)
residuals_sum_squares = np.sum(residuals**2, axis=0)
degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int)
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
degrees_of_freedom[k] = np.sum(mask)
self.alphas_ = alphas_
if self.noise_variance is None:
self.noise_variance_ = self._estimate_noise_variance(
X, y, positive=self.positive
)
else:
self.noise_variance_ = self.noise_variance
self.criterion_ = (
n_samples * np.log(2 * np.pi * self.noise_variance_)
+ residuals_sum_squares / self.noise_variance_
+ criterion_factor * degrees_of_freedom
)
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| LassoLarsIC.fit |
scikit-learn | 88 | sklearn/decomposition/_lda.py | def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), \
default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
| /usr/src/app/target_test_cases/failed_tests_LatentDirichletAllocation._perplexity_precomp_distr.txt | def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), \
default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr is None:
doc_topic_distr = self._unnormalized_transform(X)
else:
n_samples, n_components = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError(
"Number of samples in X and doc_topic_distr do not match."
)
if n_components != self.n_components:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
| LatentDirichletAllocation._perplexity_precomp_distr |
scikit-learn | 89 | sklearn/decomposition/_lda.py | def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_LatentDirichletAllocation.fit.txt | def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
X = self._check_non_neg_array(
X, reset_n_features=True, whom="LatentDirichletAllocation.fit"
)
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features, dtype=X.dtype)
# change to perplexity later
last_bound = None
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in range(max_iter):
if learning_method == "online":
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(
X[idx_slice, :],
total_samples=n_samples,
batch_update=False,
parallel=parallel,
)
else:
# batch update
self._em_step(
X, total_samples=n_samples, batch_update=True, parallel=parallel
)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(
X, cal_sstats=False, random_init=False, parallel=parallel
)
bound = self._perplexity_precomp_distr(
X, doc_topics_distr, sub_sampling=False
)
if self.verbose:
print(
"iteration: %d of max_iter: %d, perplexity: %.4f"
% (i + 1, max_iter, bound)
)
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print("iteration: %d of max_iter: %d" % (i + 1, max_iter))
self.n_iter_ += 1
# calculate final perplexity value on train set
doc_topics_distr, _ = self._e_step(
X, cal_sstats=False, random_init=False, parallel=parallel
)
self.bound_ = self._perplexity_precomp_distr(
X, doc_topics_distr, sub_sampling=False
)
return self
| LatentDirichletAllocation.fit |
scikit-learn | 90 | sklearn/decomposition/_lda.py | def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Partially fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_LatentDirichletAllocation.partial_fit.txt | def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Partially fitted estimator.
"""
first_time = not hasattr(self, "components_")
X = self._check_non_neg_array(
X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit"
)
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if first_time:
self._init_latent_vars(n_features, dtype=X.dtype)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d."
% (n_features, self.components_.shape[1])
)
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(
X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel,
)
return self
| LatentDirichletAllocation.partial_fit |
scikit-learn | 91 | sklearn/decomposition/_lda.py | def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
score : float
Use approximate bound as score.
"""
| /usr/src/app/target_test_cases/failed_tests_LatentDirichletAllocation.score.txt | def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
score : float
Use approximate bound as score.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=False, whom="LatentDirichletAllocation.score"
)
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
| LatentDirichletAllocation.score |
scikit-learn | 92 | sklearn/decomposition/_lda.py | def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
`doc_topic_distr` is now normalized.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
| /usr/src/app/target_test_cases/failed_tests_LatentDirichletAllocation.transform.txt | def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
`doc_topic_distr` is now normalized.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=False, whom="LatentDirichletAllocation.transform"
)
doc_topic_distr = self._unnormalized_transform(X)
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
| LatentDirichletAllocation.transform |
scikit-learn | 93 | sklearn/covariance/_shrunk_covariance.py | def fit(self, X, y=None):
"""Fit the Ledoit-Wolf shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_LedoitWolf.fit.txt | def fit(self, X, y=None):
"""Fit the Ledoit-Wolf shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
X = validate_data(self, X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = _ledoit_wolf(
X - self.location_, assume_centered=True, block_size=self.block_size
)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| LedoitWolf.fit |
scikit-learn | 94 | sklearn/discriminant_analysis.py | def fit(self, X, y):
"""Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
`store_covariance` and `tol` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearDiscriminantAnalysis.fit.txt | def fit(self, X, y):
"""Fit the Linear Discriminant Analysis model.
.. versionchanged:: 0.19
`store_covariance` and `tol` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
xp, _ = get_namespace(X)
X, y = validate_data(
self, X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32]
)
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = self.classes_.shape[0]
if n_samples == n_classes:
raise ValueError(
"The number of samples must be more than the number of classes."
)
if self.priors is None: # estimate priors from sample
_, cnts = xp.unique_counts(y) # non-negative ints
self.priors_ = xp.astype(cnts, X.dtype) / float(y.shape[0])
else:
self.priors_ = xp.asarray(self.priors, dtype=X.dtype)
if xp.any(self.priors_ < 0):
raise ValueError("priors must be non-negative")
if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5:
warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(n_classes - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError(
"n_components cannot be larger than min(n_features, n_classes - 1)."
)
self._max_components = self.n_components
if self.solver == "svd":
if self.shrinkage is not None:
raise NotImplementedError("shrinkage not supported with 'svd' solver.")
if self.covariance_estimator is not None:
raise ValueError(
"covariance estimator "
"is not supported "
"with svd solver. Try another solver"
)
self._solve_svd(X, y)
elif self.solver == "lsqr":
self._solve_lstsq(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
elif self.solver == "eigen":
self._solve_eigen(
X,
y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator,
)
if size(self.classes_) == 2: # treat binary case as a special case
coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype)
self.coef_ = xp.reshape(coef_, (1, -1))
intercept_ = xp.asarray(
self.intercept_[1] - self.intercept_[0], dtype=X.dtype
)
self.intercept_ = xp.reshape(intercept_, (1,))
self._n_features_out = self._max_components
return self
| LinearDiscriminantAnalysis.fit |
scikit-learn | 95 | sklearn/discriminant_analysis.py | def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearDiscriminantAnalysis.predict_log_proba.txt | def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
"""
xp, _ = get_namespace(X)
prediction = self.predict_proba(X)
info = xp.finfo(prediction.dtype)
if hasattr(info, "smallest_normal"):
smallest_normal = info.smallest_normal
else:
# smallest_normal was introduced in NumPy 1.22
smallest_normal = info.tiny
prediction[prediction == 0.0] += smallest_normal
return xp.log(prediction)
| LinearDiscriminantAnalysis.predict_log_proba |
scikit-learn | 96 | sklearn/discriminant_analysis.py | def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearDiscriminantAnalysis.predict_proba.txt | def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self)
xp, is_array_api_compliant = get_namespace(X)
decision = self.decision_function(X)
if size(self.classes_) == 2:
proba = _expit(decision, xp)
return xp.stack([1 - proba, proba], axis=1)
else:
return softmax(decision)
| LinearDiscriminantAnalysis.predict_proba |
scikit-learn | 97 | sklearn/discriminant_analysis.py | def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components) or \
(n_samples, min(rank, n_components))
Transformed data. In the case of the 'svd' solver, the shape
is (n_samples, min(rank, n_components)).
"""
| /usr/src/app/target_test_cases/failed_tests_LinearDiscriminantAnalysis.transform.txt | def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components) or \
(n_samples, min(rank, n_components))
Transformed data. In the case of the 'svd' solver, the shape
is (n_samples, min(rank, n_components)).
"""
if self.solver == "lsqr":
raise NotImplementedError(
"transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')."
)
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(self, X, reset=False)
if self.solver == "svd":
X_new = (X - self.xbar_) @ self.scalings_
elif self.solver == "eigen":
X_new = X @ self.scalings_
return X_new[:, : self._max_components]
| LinearDiscriminantAnalysis.transform |
scikit-learn | 98 | sklearn/linear_model/_linear_loss.py | def gradient(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Computes the gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearModelLoss.gradient.txt | def gradient(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Computes the gradient w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
grad_pointwise = self.base_loss.gradient(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
grad_pointwise /= sw_sum
if not self.base_loss.is_multiclass:
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
return grad
else:
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
# gradient.shape = (n_samples, n_classes)
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
if coef.ndim == 1:
return grad.ravel(order="F")
else:
return grad
| LinearModelLoss.gradient |
scikit-learn | 99 | sklearn/linear_model/_linear_loss.py | def gradient_hessian(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
gradient_out=None,
hessian_out=None,
raw_prediction=None,
):
"""Computes gradient and hessian w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
gradient_out : None or ndarray of shape coef.shape
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or ndarray
A location into which the hessian is stored. If None, a new array
might be created.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessian : ndarray
Hessian matrix.
hessian_warning : bool
True if pointwise hessian has more than half of its elements non-positive.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearModelLoss.gradient_hessian.txt | def gradient_hessian(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
gradient_out=None,
hessian_out=None,
raw_prediction=None,
):
"""Computes gradient and hessian w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
gradient_out : None or ndarray of shape coef.shape
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or ndarray
A location into which the hessian is stored. If None, a new array
might be created.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessian : ndarray
Hessian matrix.
hessian_warning : bool
True if pointwise hessian has more than half of its elements non-positive.
"""
n_samples, n_features = X.shape
n_dof = n_features + int(self.fit_intercept)
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
# For non-canonical link functions and far away from the optimum, the pointwise
# hessian can be negative. We take care that 75% of the hessian entries are
# positive.
hessian_warning = np.mean(hess_pointwise <= 0) > 0.25
hess_pointwise = np.abs(hess_pointwise)
if not self.base_loss.is_multiclass:
# gradient
if gradient_out is None:
grad = np.empty_like(coef, dtype=weights.dtype)
else:
grad = gradient_out
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
# hessian
if hessian_out is None:
hess = np.empty(shape=(n_dof, n_dof), dtype=weights.dtype)
else:
hess = hessian_out
if hessian_warning:
# Exit early without computing the hessian.
return grad, hess, hessian_warning
# TODO: This "sandwich product", X' diag(W) X, is the main computational
# bottleneck for solvers. A dedicated Cython routine might improve it
# exploiting the symmetry (as opposed to, e.g., BLAS gemm).
if sparse.issparse(X):
hess[:n_features, :n_features] = (
X.T
@ sparse.dia_matrix(
(hess_pointwise, 0), shape=(n_samples, n_samples)
)
@ X
).toarray()
else:
# np.einsum may use less memory but the following, using BLAS matrix
# multiplication (gemm), is by far faster.
WX = hess_pointwise[:, None] * X
hess[:n_features, :n_features] = np.dot(X.T, WX)
if l2_reg_strength > 0:
# The L2 penalty enters the Hessian on the diagonal only. To add those
# terms, we use a flattened view on the array.
hess.reshape(-1)[
: (n_features * n_dof) : (n_dof + 1)
] += l2_reg_strength
if self.fit_intercept:
# With intercept included as added column to X, the hessian becomes
# hess = (X, 1)' @ diag(h) @ (X, 1)
# = (X' @ diag(h) @ X, X' @ h)
# ( h @ X, sum(h))
# The left upper part has already been filled, it remains to compute
# the last row and the last column.
Xh = X.T @ hess_pointwise
hess[:-1, -1] = Xh
hess[-1, :-1] = Xh
hess[-1, -1] = hess_pointwise.sum()
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
raise NotImplementedError
return grad, hess, hessian_warning
| LinearModelLoss.gradient_hessian |
scikit-learn | 100 | sklearn/linear_model/_linear_loss.py | def gradient_hessian_product(
self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1
):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearModelLoss.gradient_hessian_product.txt | def gradient_hessian_product(
self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1
):
"""Computes gradient and hessp (hessian product function) w.r.t. coef.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
Returns
-------
gradient : ndarray of shape coef.shape
The gradient of the loss.
hessp : callable
Function that takes in a vector input of shape of gradient and
and returns matrix-vector product with hessian.
"""
(n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes
n_dof = n_features + int(self.fit_intercept)
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
sw_sum = n_samples if sample_weight is None else np.sum(sample_weight)
if not self.base_loss.is_multiclass:
grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
hess_pointwise /= sw_sum
grad = np.empty_like(coef, dtype=weights.dtype)
grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights
if self.fit_intercept:
grad[-1] = grad_pointwise.sum()
# Precompute as much as possible: hX, hX_sum and hessian_sum
hessian_sum = hess_pointwise.sum()
if sparse.issparse(X):
hX = (
sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples))
@ X
)
else:
hX = hess_pointwise[:, np.newaxis] * X
if self.fit_intercept:
# Calculate the double derivative with respect to intercept.
# Note: In case hX is sparse, hX.sum is a matrix object.
hX_sum = np.squeeze(np.asarray(hX.sum(axis=0)))
# prevent squeezing to zero-dim array if n_features == 1
hX_sum = np.atleast_1d(hX_sum)
# With intercept included and l2_reg_strength = 0, hessp returns
# res = (X, 1)' @ diag(h) @ (X, 1) @ s
# = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1])
# res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1]
# res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1]
def hessp(s):
ret = np.empty_like(s)
if sparse.issparse(X):
ret[:n_features] = X.T @ (hX @ s[:n_features])
else:
ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]])
ret[:n_features] += l2_reg_strength * s[:n_features]
if self.fit_intercept:
ret[:n_features] += s[-1] * hX_sum
ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1]
return ret
else:
# Here we may safely assume HalfMultinomialLoss aka categorical
# cross-entropy.
# HalfMultinomialLoss computes only the diagonal part of the hessian, i.e.
# diagonal in the classes. Here, we want the matrix-vector product of the
# full hessian. Therefore, we call gradient_proba.
grad_pointwise, proba = self.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=n_threads,
)
grad_pointwise /= sw_sum
grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights
if self.fit_intercept:
grad[:, -1] = grad_pointwise.sum(axis=0)
# Full hessian-vector product, i.e. not only the diagonal part of the
# hessian. Derivation with some index battle for input vector s:
# - sample index i
# - feature indices j, m
# - class indices k, l
# - 1_{k=l} is one if k=l else 0
# - p_i_k is the (predicted) probability that sample i belongs to class k
# for all i: sum_k p_i_k = 1
# - s_l_m is input vector for class l and feature m
# - X' = X transposed
#
# Note: Hessian with dropping most indices is just:
# X' @ p_k (1(k=l) - p_l) @ X
#
# result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m
# = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l)
# * X_{im} s_l_m
# = sum_{i, m} (X')_{ji} * p_i_k
# * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m)
#
# See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411 # noqa
def hessp(s):
s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof)
if self.fit_intercept:
s_intercept = s[:, -1]
s = s[:, :-1] # shape = (n_classes, n_features)
else:
s_intercept = 0
tmp = X @ s.T + s_intercept # X_{im} * s_k_m
tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l ..
tmp *= proba # * p_i_k
if sample_weight is not None:
tmp *= sample_weight[:, np.newaxis]
# hess_prod = empty_like(grad), but we ravel grad below and this
# function is run after that.
hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F")
hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s
if self.fit_intercept:
hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum
if coef.ndim == 1:
return hess_prod.ravel(order="F")
else:
return hess_prod
if coef.ndim == 1:
return grad.ravel(order="F"), hessp
return grad, hessp
| LinearModelLoss.gradient_hessian_product |
scikit-learn | 101 | sklearn/linear_model/_linear_loss.py | def init_zero_coef(self, X, dtype=None):
"""Allocate coef of correct shape with zeros.
Parameters:
-----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
dtype : data-type, default=None
Overrides the data type of coef. With dtype=None, coef will have the same
dtype as X.
Returns
-------
coef : ndarray of shape (n_dof,) or (n_classes, n_dof)
Coefficients of a linear model.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearModelLoss.init_zero_coef.txt | def init_zero_coef(self, X, dtype=None):
"""Allocate coef of correct shape with zeros.
Parameters:
-----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
dtype : data-type, default=None
Overrides the data type of coef. With dtype=None, coef will have the same
dtype as X.
Returns
-------
coef : ndarray of shape (n_dof,) or (n_classes, n_dof)
Coefficients of a linear model.
"""
n_features = X.shape[1]
n_classes = self.base_loss.n_classes
if self.fit_intercept:
n_dof = n_features + 1
else:
n_dof = n_features
if self.base_loss.is_multiclass:
coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order="F")
else:
coef = np.zeros_like(X, shape=n_dof, dtype=dtype)
return coef
| LinearModelLoss.init_zero_coef |
scikit-learn | 102 | sklearn/linear_model/_linear_loss.py | def loss(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Compute the loss as weighted average over point-wise losses.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
loss : float
Weighted average of losses per sample, plus penalty.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearModelLoss.loss.txt | def loss(
self,
coef,
X,
y,
sample_weight=None,
l2_reg_strength=0.0,
n_threads=1,
raw_prediction=None,
):
"""Compute the loss as weighted average over point-wise losses.
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : contiguous array of shape (n_samples,)
Observed, true target values.
sample_weight : None or contiguous array of shape (n_samples,), default=None
Sample weights.
l2_reg_strength : float, default=0.0
L2 regularization strength
n_threads : int, default=1
Number of OpenMP threads to use.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space). If provided, these are used. If
None, then raw_prediction = X @ coef + intercept is calculated.
Returns
-------
loss : float
Weighted average of losses per sample, plus penalty.
"""
if raw_prediction is None:
weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X)
else:
weights, intercept = self.weight_intercept(coef)
loss = self.base_loss.loss(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=None,
n_threads=n_threads,
)
loss = np.average(loss, weights=sample_weight)
return loss + self.l2_penalty(weights, l2_reg_strength)
| LinearModelLoss.loss |
scikit-learn | 103 | sklearn/linear_model/_base.py | def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : object
Fitted Estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearRegression.fit.txt | def fit(self, X, y, sample_weight=None):
"""
Fit linear model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample.
.. versionadded:: 0.17
parameter *sample_weight* support to LinearRegression.
Returns
-------
self : object
Fitted Estimator.
"""
n_jobs_ = self.n_jobs
accept_sparse = False if self.positive else ["csr", "csc", "coo"]
X, y = validate_data(
self,
X,
y,
accept_sparse=accept_sparse,
y_numeric=True,
multi_output=True,
force_writeable=True,
)
has_sw = sample_weight is not None
if has_sw:
sample_weight = _check_sample_weight(
sample_weight, X, dtype=X.dtype, ensure_non_negative=True
)
# Note that neither _rescale_data nor the rest of the fit method of
# LinearRegression can benefit from in-place operations when X is a
# sparse matrix. Therefore, let's not copy X when it is sparse.
copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X)
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X,
y,
fit_intercept=self.fit_intercept,
copy=copy_X_in_preprocess_data,
sample_weight=sample_weight,
)
if has_sw:
# Sample weight can be implemented via a simple rescaling. Note
# that we safely do inplace rescaling when _preprocess_data has
# already made a copy if requested.
X, y, sample_weight_sqrt = _rescale_data(
X, y, sample_weight, inplace=copy_X_in_preprocess_data
)
if self.positive:
if y.ndim < 2:
self.coef_ = optimize.nnls(X, y)[0]
else:
# scipy.optimize.nnls cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])
)
self.coef_ = np.vstack([out[0] for out in outs])
elif sp.issparse(X):
X_offset_scale = X_offset / X_scale
if has_sw:
def matvec(b):
return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
else:
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * b.sum()
X_centered = sparse.linalg.LinearOperator(
shape=X.shape, matvec=matvec, rmatvec=rmatvec
)
if y.ndim < 2:
self.coef_ = lsqr(X_centered, y)[0]
else:
# sparse_lstsq cannot handle y with shape (M, K)
outs = Parallel(n_jobs=n_jobs_)(
delayed(lsqr)(X_centered, y[:, j].ravel())
for j in range(y.shape[1])
)
self.coef_ = np.vstack([out[0] for out in outs])
else:
self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y)
self.coef_ = self.coef_.T
if y.ndim == 1:
self.coef_ = np.ravel(self.coef_)
self._set_intercept(X_offset, y_offset, X_scale)
return self
| LinearRegression.fit |
scikit-learn | 104 | sklearn/svm/_classes.py | def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_LinearSVC.fit.txt | def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
)
check_classification_targets(y)
self.classes_ = np.unique(y)
_dual = _validate_dual_parameter(
self.dual, self.loss, self.penalty, self.multi_class, X
)
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
self.class_weight,
self.penalty,
_dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
self.multi_class,
self.loss,
sample_weight=sample_weight,
)
# Backward compatibility: _fit_liblinear is used both by LinearSVC/R
# and LogisticRegression but LogisticRegression sets a structured
# `n_iter_` attribute with information about the underlying OvR fits
# while LinearSVC/R only reports the maximum value.
self.n_iter_ = n_iter_.max().item()
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
| LinearSVC.fit |
scikit-learn | 105 | sklearn/neighbors/_lof.py | def fit(self, X, y=None):
"""Fit the local outlier factor detector from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : LocalOutlierFactor
The fitted local outlier factor detector.
"""
| /usr/src/app/target_test_cases/failed_tests_LocalOutlierFactor.fit.txt | def fit(self, X, y=None):
"""Fit the local outlier factor detector from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : LocalOutlierFactor
The fitted local outlier factor detector.
"""
self._fit(X)
n_samples = self.n_samples_fit_
if self.n_neighbors > n_samples:
warnings.warn(
"n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples)
)
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
n_neighbors=self.n_neighbors_
)
if self._fit_X.dtype == np.float32:
self._distances_fit_X_ = self._distances_fit_X_.astype(
self._fit_X.dtype,
copy=False,
)
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_
)
# Compute lof score over training samples to define offset_:
lrd_ratios_array = (
self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
)
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == "auto":
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = np.percentile(
self.negative_outlier_factor_, 100.0 * self.contamination
)
# Verify if negative_outlier_factor_ values are within acceptable range.
# Novelty must also be false to detect outliers
if np.min(self.negative_outlier_factor_) < -1e7 and not self.novelty:
warnings.warn(
"Duplicate values are leading to incorrect results. "
"Increase the number of neighbors for more accurate results."
)
return self
| LocalOutlierFactor.fit |
scikit-learn | 106 | sklearn/neighbors/_lof.py | def score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point. Because of this, the scores obtained via ``score_samples`` may
differ from the standard LOF scores.
The standard LOF scores for the training data is available via the
``negative_outlier_factor_`` attribute.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
| /usr/src/app/target_test_cases/failed_tests_LocalOutlierFactor.score_samples.txt | def score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point. Because of this, the scores obtained via ``score_samples`` may
differ from the standard LOF scores.
The standard LOF scores for the training data is available via the
``negative_outlier_factor_`` attribute.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse="csr")
distances_X, neighbors_indices_X = self.kneighbors(
X, n_neighbors=self.n_neighbors_
)
if X.dtype == np.float32:
distances_X = distances_X.astype(X.dtype, copy=False)
X_lrd = self._local_reachability_density(
distances_X,
neighbors_indices_X,
)
lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
| LocalOutlierFactor.score_samples |
scikit-learn | 107 | sklearn/linear_model/_logistic.py | def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e. calculate the probability
of each class assuming it to be positive using the logistic function
and normalize these values across all the classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
| /usr/src/app/target_test_cases/failed_tests_LogisticRegression.predict_proba.txt | def predict_proba(self, X):
"""
Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e. calculate the probability
of each class assuming it to be positive using the logistic function
and normalize these values across all the classes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Vector to be scored, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
T : array-like of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
check_is_fitted(self)
ovr = self.multi_class in ["ovr", "warn"] or (
self.multi_class in ["auto", "deprecated"]
and (
self.classes_.size <= 2
or self.solver in ("liblinear", "newton-cholesky")
)
)
if ovr:
return super()._predict_proba_lr(X)
else:
decision = self.decision_function(X)
if decision.ndim == 1:
# Workaround for multi_class="multinomial" and binary outcomes
# which requires softmax prediction with only a 1D decision.
decision_2d = np.c_[-decision, decision]
else:
decision_2d = decision
return softmax(decision_2d, copy=False)
| LogisticRegression.predict_proba |
scikit-learn | 108 | sklearn/neural_network/_multilayer_perceptron.py | def partial_fit(self, X, y, classes=None):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : array-like of shape (n_samples,)
The target values.
classes : array of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Trained MLP model.
"""
| /usr/src/app/target_test_cases/failed_tests_MLPClassifier.partial_fit.txt | def partial_fit(self, X, y, classes=None):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : array-like of shape (n_samples,)
The target values.
classes : array of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Trained MLP model.
"""
if _check_partial_fit_first_call(self, classes):
self._label_binarizer = LabelBinarizer()
if type_of_target(y).startswith("multilabel"):
self._label_binarizer.fit(y)
else:
self._label_binarizer.fit(classes)
return self._fit(X, y, incremental=True)
| MLPClassifier.partial_fit |
scikit-learn | 109 | sklearn/neural_network/_multilayer_perceptron.py | def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
| /usr/src/app/target_test_cases/failed_tests_MLPClassifier.predict_proba.txt | def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
| MLPClassifier.predict_proba |
scikit-learn | 110 | sklearn/cluster/_mean_shift.py | def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
"""
| /usr/src/app/target_test_cases/failed_tests_MeanShift.fit.txt | def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
"""
X = validate_data(self, X)
bandwidth = self.bandwidth
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
seeds = self.seeds
if seeds is None:
if self.bin_seeding:
seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=self.n_jobs)(
delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
for seed in seeds
)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i][1]: # i.e. len(points_within) > 0
center_intensity_dict[all_res[i][0]] = all_res[i][1]
self.n_iter_ = max([x[2] for x in all_res])
if not center_intensity_dict:
# nothing near seeds
raise ValueError(
"No point was within bandwidth=%f of any seed. Try a different seeding"
" strategy or increase the bandwidth."
% bandwidth
)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(
center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True,
)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=bool)
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
sorted_centers
)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
0
]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=int)
distances, idxs = nbrs.kneighbors(X)
if self.cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
self.cluster_centers_, self.labels_ = cluster_centers, labels
return self
| MeanShift.fit |
scikit-learn | 111 | sklearn/covariance/_robust_covariance.py | def fit(self, X, y=None):
"""Fit a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_MinCovDet.fit.txt | def fit(self, X, y=None):
"""Fit a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, ensure_min_samples=2, estimator="MinCovDet")
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn(
"The covariance matrix associated to your dataset is not full rank"
)
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X,
support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state,
)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(
X[raw_support], assume_centered=True
)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
| MinCovDet.fit |
scikit-learn | 112 | sklearn/decomposition/_dict_learning.py | def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_MiniBatchDictionaryLearning.fit.txt | def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(
self, X, dtype=[np.float64, np.float32], order="C", copy=False
)
self._check_params(X)
self._random_state = check_random_state(self.random_state)
dictionary = self._initialize_dict(X, self._random_state)
old_dict = dictionary.copy()
if self.shuffle:
X_train = X.copy()
self._random_state.shuffle(X_train)
else:
X_train = X
n_samples, n_features = X_train.shape
if self.verbose:
print("[dict_learning]")
# Inner stats
self._A = np.zeros(
(self._n_components, self._n_components), dtype=X_train.dtype
)
self._B = np.zeros((n_features, self._n_components), dtype=X_train.dtype)
# TODO(1.6): remove in 1.6
if self.max_iter is None:
warn(
(
"`max_iter=None` is deprecated in version 1.4 and will be removed"
" in version 1.6. Use the default value (i.e. `1_000`) instead."
),
FutureWarning,
)
max_iter = 1_000
else:
max_iter = self.max_iter
# Attributes to monitor the convergence
self._ewa_cost = None
self._ewa_cost_min = None
self._no_improvement = 0
batches = gen_batches(n_samples, self._batch_size)
batches = itertools.cycle(batches)
n_steps_per_iter = int(np.ceil(n_samples / self._batch_size))
n_steps = max_iter * n_steps_per_iter
i = -1 # to allow max_iter = 0
for i, batch in zip(range(n_steps), batches):
X_batch = X_train[batch]
batch_cost = self._minibatch_step(
X_batch, dictionary, self._random_state, i
)
if self._check_convergence(
X_batch, batch_cost, dictionary, old_dict, n_samples, i, n_steps
):
break
# XXX callback param added for backward compat in #18975 but a common
# unified callback API should be preferred
if self.callback is not None:
self.callback(locals())
old_dict[:] = dictionary
self.n_steps_ = i + 1
self.n_iter_ = np.ceil(self.n_steps_ / n_steps_per_iter)
self.components_ = dictionary
return self
| MiniBatchDictionaryLearning.fit |
scikit-learn | 113 | sklearn/decomposition/_dict_learning.py | def partial_fit(self, X, y=None):
"""Update the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Return the instance itself.
"""
| /usr/src/app/target_test_cases/failed_tests_MiniBatchDictionaryLearning.partial_fit.txt | def partial_fit(self, X, y=None):
"""Update the model using the data in X as a mini-batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Return the instance itself.
"""
has_components = hasattr(self, "components_")
X = validate_data(
self, X, dtype=[np.float64, np.float32], order="C", reset=not has_components
)
if not has_components:
# This instance has not been fitted yet (fit or partial_fit)
self._check_params(X)
self._random_state = check_random_state(self.random_state)
dictionary = self._initialize_dict(X, self._random_state)
self.n_steps_ = 0
self._A = np.zeros((self._n_components, self._n_components), dtype=X.dtype)
self._B = np.zeros((X.shape[1], self._n_components), dtype=X.dtype)
else:
dictionary = self.components_
self._minibatch_step(X, dictionary, self._random_state, self.n_steps_)
self.components_ = dictionary
self.n_steps_ += 1
return self
| MiniBatchDictionaryLearning.partial_fit |
scikit-learn | 114 | sklearn/cluster/_kmeans.py | def fit(self, X, y=None, sample_weight=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_MiniBatchKMeans.fit.txt | def fit(self, X, y=None, sample_weight=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
.. versionadded:: 0.20
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
)
self._check_params_vs_input(X)
random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
n_samples, n_features = X.shape
# Validate init array
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
self._check_mkl_vcomp(X, self._batch_size)
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
# Validation set for the init
validation_indices = random_state.randint(0, n_samples, self._init_size)
X_valid = X[validation_indices]
sample_weight_valid = sample_weight[validation_indices]
# perform several inits with random subsets
best_inertia = None
for init_idx in range(self._n_init):
if self.verbose:
print(f"Init {init_idx + 1}/{self._n_init} with method {init}")
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans.
cluster_centers = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=random_state,
init_size=self._init_size,
sample_weight=sample_weight,
)
# Compute inertia on a validation set.
_, inertia = _labels_inertia_threadpool_limit(
X_valid,
sample_weight_valid,
cluster_centers,
n_threads=self._n_threads,
)
if self.verbose:
print(f"Inertia for init {init_idx + 1}/{self._n_init}: {inertia}")
if best_inertia is None or inertia < best_inertia:
init_centers = cluster_centers
best_inertia = inertia
centers = init_centers
centers_new = np.empty_like(centers)
# Initialize counts
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
# Attributes to monitor the convergence
self._ewa_inertia = None
self._ewa_inertia_min = None
self._no_improvement = 0
# Initialize number of samples seen since last reassignment
self._n_since_last_reassign = 0
n_steps = (self.max_iter * n_samples) // self._batch_size
with _get_threadpool_controller().limit(limits=1, user_api="blas"):
# Perform the iterative optimization until convergence
for i in range(n_steps):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(0, n_samples, self._batch_size)
# Perform the actual update step on the minibatch data
batch_inertia = _mini_batch_step(
X=X[minibatch_indices],
sample_weight=sample_weight[minibatch_indices],
centers=centers,
centers_new=centers_new,
weight_sums=self._counts,
random_state=random_state,
random_reassign=self._random_reassign(),
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose,
n_threads=self._n_threads,
)
if self._tol > 0.0:
centers_squared_diff = np.sum((centers_new - centers) ** 2)
else:
centers_squared_diff = 0
centers, centers_new = centers_new, centers
# Monitor convergence and do early stopping if necessary
if self._mini_batch_convergence(
i, n_steps, n_samples, centers_squared_diff, batch_inertia
):
break
self.cluster_centers_ = centers
self._n_features_out = self.cluster_centers_.shape[0]
self.n_steps_ = i + 1
self.n_iter_ = int(np.ceil(((i + 1) * self._batch_size) / n_samples))
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
X,
sample_weight,
self.cluster_centers_,
n_threads=self._n_threads,
)
else:
self.inertia_ = self._ewa_inertia * n_samples
return self
| MiniBatchKMeans.fit |
scikit-learn | 115 | sklearn/cluster/_kmeans.py | def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_MiniBatchKMeans.partial_fit.txt | def partial_fit(self, X, y=None, sample_weight=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
If a sparse matrix is passed, a copy will be made if it's not in
CSR format.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
Returns
-------
self : object
Return updated estimator.
"""
has_centers = hasattr(self, "cluster_centers_")
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
reset=not has_centers,
)
self._random_state = getattr(
self, "_random_state", check_random_state(self.random_state)
)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.n_steps_ = getattr(self, "n_steps_", 0)
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if not has_centers:
# this instance has not been fitted yet (fit or partial_fit)
self._check_params_vs_input(X)
self._n_threads = _openmp_effective_n_threads()
# Validate init array
init = self.init
if _is_arraylike_not_scalar(init):
init = check_array(init, dtype=X.dtype, copy=True, order="C")
self._validate_center_shape(X, init)
self._check_mkl_vcomp(X, X.shape[0])
# initialize the cluster centers
self.cluster_centers_ = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=init,
random_state=self._random_state,
init_size=self._init_size,
sample_weight=sample_weight,
)
# Initialize counts
self._counts = np.zeros(self.n_clusters, dtype=X.dtype)
# Initialize number of samples seen since last reassignment
self._n_since_last_reassign = 0
with _get_threadpool_controller().limit(limits=1, user_api="blas"):
_mini_batch_step(
X,
sample_weight=sample_weight,
centers=self.cluster_centers_,
centers_new=self.cluster_centers_,
weight_sums=self._counts,
random_state=self._random_state,
random_reassign=self._random_reassign(),
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose,
n_threads=self._n_threads,
)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia_threadpool_limit(
X,
sample_weight,
self.cluster_centers_,
n_threads=self._n_threads,
)
self.n_steps_ += 1
self._n_features_out = self.cluster_centers_.shape[0]
return self
| MiniBatchKMeans.partial_fit |
scikit-learn | 116 | sklearn/impute/_base.py | def transform(self, X):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
| /usr/src/app/target_test_cases/failed_tests_MissingIndicator.transform.txt | def transform(self, X):
"""Generate missing values indicator for `X`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of `Xt`
will be boolean.
"""
check_is_fitted(self)
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=False)
else:
if not (hasattr(X, "dtype") and X.dtype.kind == "b"):
raise ValueError("precomputed is True but the input data is not a mask")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if self.error_on_new and features_diff_fit_trans.size > 0:
raise ValueError(
"The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans)
)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
| MissingIndicator.transform |
scikit-learn | 117 | sklearn/preprocessing/_label.py | def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
format.
"""
| /usr/src/app/target_test_cases/failed_tests_MultiLabelBinarizer.fit_transform.txt | def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets.
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`
is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR
format.
"""
if self.classes is not None:
return self.fit(y).transform(y)
self._cached_dict = None
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
# ensure yt.indices keeps its current dtype
yt.indices = np.asarray(inverse[yt.indices], dtype=yt.indices.dtype)
if not self.sparse_output:
yt = yt.toarray()
return yt
| MultiLabelBinarizer.fit_transform |
scikit-learn | 118 | sklearn/preprocessing/_label.py | def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets.
Parameters
----------
yt : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
| /usr/src/app/target_test_cases/failed_tests_MultiLabelBinarizer.inverse_transform.txt | def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets.
Parameters
----------
yt : {ndarray, sparse matrix} of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
check_is_fitted(self)
if yt.shape[1] != len(self.classes_):
raise ValueError(
"Expected indicator for {0} classes, but got {1}".format(
len(self.classes_), yt.shape[1]
)
)
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError("Expected only 0s and 1s in label indicator.")
return [
tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])
]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError(
"Expected only 0s and 1s in label indicator. Also got {0}".format(
unexpected
)
)
return [tuple(self.classes_.compress(indicators)) for indicators in yt]
| MultiLabelBinarizer.inverse_transform |
scikit-learn | 119 | sklearn/multioutput.py | def score(self, X, y):
"""Return the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples, n_outputs)
True values for X.
Returns
-------
scores : float
Mean accuracy of predicted target versus true target.
"""
| /usr/src/app/target_test_cases/failed_tests_MultiOutputClassifier.score.txt | def score(self, X, y):
"""Return the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples, n_outputs)
True values for X.
Returns
-------
scores : float
Mean accuracy of predicted target versus true target.
"""
check_is_fitted(self)
n_outputs_ = len(self.estimators_)
if y.ndim == 1:
raise ValueError(
"y must have at least two dimensions for "
"multi target classification but has only one"
)
if y.shape[1] != n_outputs_:
raise ValueError(
"The number of outputs of Y for fit {0} and"
" score {1} should be same".format(n_outputs_, y.shape[1])
)
y_pred = self.predict(X)
return np.mean(np.all(y == y_pred, axis=1))
| MultiOutputClassifier.score |
scikit-learn | 120 | sklearn/linear_model/_coordinate_descent.py | def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data.
y : ndarray of shape (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
| /usr/src/app/target_test_cases/failed_tests_MultiTaskElasticNet.fit.txt | def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data.
y : ndarray of shape (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# Need to validate separately here.
# We can't pass multi_output=True because that would allow y to be csr.
check_X_params = dict(
dtype=[np.float64, np.float32],
order="F",
force_writeable=True,
copy=self.copy_X and self.fit_intercept,
)
check_y_params = dict(ensure_2d=False, order="F")
X, y = validate_data(
self, X, y, validate_separately=(check_X_params, check_y_params)
)
check_consistent_length(X, y)
y = y.astype(X.dtype)
if hasattr(self, "l1_ratio"):
model_str = "ElasticNet"
else:
model_str = "Lasso"
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
n_targets = y.shape[1]
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, fit_intercept=self.fit_intercept, copy=False
)
if not self.warm_start or not hasattr(self, "coef_"):
self.coef_ = np.zeros(
(n_targets, n_features), dtype=X.dtype.type, order="F"
)
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
random = self.selection == "random"
(
self.coef_,
self.dual_gap_,
self.eps_,
self.n_iter_,
) = cd_fast.enet_coordinate_descent_multi_task(
self.coef_,
l1_reg,
l2_reg,
X,
y,
self.max_iter,
self.tol,
check_random_state(self.random_state),
random,
)
# account for different objective scaling here and in cd_fast
self.dual_gap_ /= n_samples
self._set_intercept(X_offset, y_offset, X_scale)
# return self for chaining fit and predict calls
return self
| MultiTaskElasticNet.fit |
scikit-learn | 121 | sklearn/neighbors/_nearest_centroid.py | def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
| /usr/src/app/target_test_cases/failed_tests_NearestCentroid.fit.txt | def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == "manhattan":
X, y = validate_data(self, X, y, accept_sparse=["csc"])
else:
X, y = validate_data(self, X, y, accept_sparse=["csr", "csc"])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d class"
% (n_classes)
)
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else: # metric == "euclidean"
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
if np.all(np.ptp(X, axis=0) == 0):
raise ValueError("All features have zero variance. Division by zero.")
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1.0 / nk) - (1.0 / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = (self.centroids_ - dataset_centroid_) / ms
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = np.abs(deviation) - self.shrink_threshold
np.clip(deviation, 0, None, out=deviation)
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
| NearestCentroid.fit |