Skip to content

ENH Consistent checks for sample weights in linear models #15530

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Nov 15, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions sklearn/linear_model/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
from ..utils.fixes import sparse_lsqr
from ..utils._seq_dataset import ArrayDataset32, CSRDataset32
from ..utils._seq_dataset import ArrayDataset64, CSRDataset64
from ..utils.validation import check_is_fitted
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..preprocessing import normalize as f_normalize

# TODO: bayesian_ridge_regression and bayesian_regression_ard
Expand Down Expand Up @@ -117,7 +117,6 @@ def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
This is here because nearly all linear models will want their data to be
centered. This function also systematically makes y consistent with X.dtype
"""

if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
Expand Down Expand Up @@ -183,7 +182,7 @@ def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True,
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = np.array(sample_weight)
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 0:
sample_weight = np.full(n_samples, sample_weight,
dtype=sample_weight.dtype)
Expand Down Expand Up @@ -404,7 +403,7 @@ class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):

Attributes
----------
coef_ : array, shape (n_features, ) or (n_targets, n_features)
coef_ : array of shape (n_features, ) or (n_targets, n_features)
Estimated coefficients for the linear regression problem.
If multiple targets are passed during the fit (y 2D), this
is a 2D array of shape (n_targets, n_features), while if only
Expand All @@ -413,10 +412,10 @@ class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
rank_ : int
Rank of matrix `X`. Only available when `X` is dense.

singular_ : array, shape (min(X, y),)
singular_ : array of shape (min(X, y),)
Singular values of `X`. Only available when `X` is dense.

intercept_ : float | array, shape = (n_targets,)
intercept_ : float or array of shape of (n_targets,)
Independent term in the linear model. Set to 0.0 if
`fit_intercept = False`.

Expand Down Expand Up @@ -457,13 +456,13 @@ def fit(self, X, y, sample_weight=None):

Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data

y : array_like, shape (n_samples, n_targets)
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary

sample_weight : numpy array of shape [n_samples]
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample

.. versionadded:: 0.17
Expand All @@ -478,8 +477,9 @@ def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
y_numeric=True, multi_output=True)

if sample_weight is not None and np.asarray(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)

X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, fit_intercept=self.fit_intercept, normalize=self.normalize,
Expand Down
8 changes: 7 additions & 1 deletion sklearn/linear_model/_bayes.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from ..utils.extmath import fast_logdet
from ..utils import check_X_y
from ..utils.fixes import pinvh
from ..utils.validation import _check_sample_weight


###############################################################################
Expand Down Expand Up @@ -169,7 +170,7 @@ def fit(self, X, y, sample_weight=None):

Parameters
----------
X : ndarray of shape (n_samples,n_features)
X : ndarray of shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
Expand All @@ -190,6 +191,11 @@ def fit(self, X, y, sample_weight=None):
' Got {!r}.'.format(self.n_iter))

X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)

if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)

X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
Expand Down
3 changes: 2 additions & 1 deletion sklearn/linear_model/_ransac.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,8 @@ def fit(self, X, y, sample_weight=None):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
sample_weight = _check_sample_weight(sample_weight, X)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)

n_inliers_best = 1
score_best = -np.inf
Expand Down
Loading