From 1677a64b649a2502ccf91b81acdb4bfaf4593263 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 1 Mar 2019 13:43:10 +0100 Subject: [PATCH 001/103] handle sparse x and intercept in _RidgeGCV with gcv_mode='eigen' --- sklearn/linear_model/ridge.py | 56 +++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index eed636622dcdc..7a439865f2dad 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -31,6 +31,7 @@ from ..model_selection import GridSearchCV from ..metrics.scorer import check_scoring from ..exceptions import ConvergenceWarning +from ..utils.sparsefuncs import mean_variance_axis def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0): @@ -1106,6 +1107,61 @@ def identity_estimator(): return self +def _centered_gram(X): + if sparse.issparse(X): + X_m, _ = mean_variance_axis(X, axis=0) + else: + X_m = X.mean(axis=0) + X_mX = safe_sparse_dot(X_m, X.T, dense_output=True) + return (safe_sparse_dot(X, X.T, dense_output=True) + np.dot(X_m, X_m) + - X_mX - X_mX[:, None], X_m) + + +class _WIPNewRidgeGCV(_RidgeGCV): + + def _pre_compute(self, X, y, centered_kernel): + if sparse.issparse(X) and self.fit_intercept: + return self._pre_compute_gram(X, y) + return super()._pre_compute(X, y, centered_kernel) + + def _set_intercept(self, X_offset, y_offset, X_scale): + if getattr(self, '_X_offset', None) is not None: + X_offset = self._X_offset + super()._set_intercept(X_offset, y_offset, X_scale) + + def _pre_compute_gram(self, X, y): + K, X_m = _centered_gram(X) + K += np.ones_like(K) + v, Q = linalg.eigh(K) + QT_y = np.dot(Q.T, y) + self._X_offset = X_m + return v, Q, QT_y + + def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): + w = 1. / (v + alpha) + constant_column = np.var(Q, 0) < 1.e-12 + if not constant_column.any(): + warnings.warn('intercept was not a singular vector of gram matrix') + w[constant_column] = 0 + + c = np.dot(Q, self._diag_dot(w, QT_y)) + G_diag = self._decomp_diag(w, Q) + # handle case where y is 2-d + if len(y.shape) != 1: + G_diag = G_diag[:, np.newaxis] + return G_diag, c + + def fit(self, X, y, sample_weight=None): + if (sparse.issparse(X) and (X.shape[0] > X.shape[1]) + and self.fit_intercept and self.gcv_mode != 'eigen'): + warnings.warn( + 'Cannot use an SVD of X if X is sparse ' + 'and fit_intercept is true. will therefore set ' + 'gcv_mode to "eigen", which can cause performance issues ' + 'if n_samples is much larger than n_features') + super().fit(X, y, sample_weight=sample_weight) + + class _BaseRidgeCV(LinearModel, MultiOutputMixin): def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, From 0c36259b8d37a8c4165aec9d810ec44cc3492b5b Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 8 Mar 2019 17:44:19 +0100 Subject: [PATCH 002/103] start svd --- sklearn/linear_model/ridge.py | 51 ++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 7 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 7a439865f2dad..4ed1ab03ffb56 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1117,6 +1117,16 @@ def _centered_gram(X): - X_mX - X_mX[:, None], X_m) +def _centered_covariance(X): + n = X.shape[0] + if sparse.issparse(X): + X_m, _ = mean_variance_axis(X, axis=0) + else: + X_m = X.mean(axis=0) + return safe_sparse_dot( + X.T, X, dense_output=True) - n * np.outer(X_m, X_m), X_m + + class _WIPNewRidgeGCV(_RidgeGCV): def _pre_compute(self, X, y, centered_kernel): @@ -1151,14 +1161,41 @@ def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c + def _pre_compute_svd(self, X, y, centered_kernel): + if sparse.issparse(X): + return self._pre_compute_covariance(X, y) + return super()._pre_compute_svd(X, y, centered_kernel) + + def _pre_compute_covariance(self, X, y): + n, p = X.shape + cov = np.empty((p + 1, p + 1)) + cov[:-1, :-1], X_m = _centered_covariance(X) + cov[-1] = 0 + cov[:, -1] = 0 + cov[-1, -1] = n + s, V = linalg.eigh(cov) + self._X_offset = X_m + return s, V, X + + def _errors_and_values_svd_helper(self, alpha, y, s, V, X): + n, p = X.shape + w = 1 / (s + alpha) + Xx = np.ones((n, p + 1)) + Xx[:, :-1] = X.A - X.A.mean(axis=0) + X = Xx + hat = np.linalg.multi_dot((X, V * w, V.T, X.T)) + hat_y = hat.dot(y) + hat_diag = np.diag(hat) + return (1 - hat_diag) / alpha, (y - hat_y) / alpha + def fit(self, X, y, sample_weight=None): - if (sparse.issparse(X) and (X.shape[0] > X.shape[1]) - and self.fit_intercept and self.gcv_mode != 'eigen'): - warnings.warn( - 'Cannot use an SVD of X if X is sparse ' - 'and fit_intercept is true. will therefore set ' - 'gcv_mode to "eigen", which can cause performance issues ' - 'if n_samples is much larger than n_features') + # if (sparse.issparse(X) and (X.shape[0] > X.shape[1]) + # and self.fit_intercept and self.gcv_mode != 'eigen'): + # warnings.warn( + # 'Cannot use an SVD of X if X is sparse ' + # 'and fit_intercept is true. will therefore set ' + # 'gcv_mode to "eigen", which can cause performance issues ' + # 'if n_samples is much larger than n_features') super().fit(X, y, sample_weight=sample_weight) From b423ea978503435a619f261f81c0420c0a14428b Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 15 Mar 2019 14:39:06 +0100 Subject: [PATCH 003/103] iter --- sklearn/linear_model/ridge.py | 60 +++++++++++++++++++++++++++++++---- 1 file changed, 53 insertions(+), 7 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 4ed1ab03ffb56..912488d3ca686 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1178,15 +1178,34 @@ def _pre_compute_covariance(self, X, y): return s, V, X def _errors_and_values_svd_helper(self, alpha, y, s, V, X): + print('sparse svd: ', alpha) n, p = X.shape w = 1 / (s + alpha) - Xx = np.ones((n, p + 1)) - Xx[:, :-1] = X.A - X.A.mean(axis=0) - X = Xx - hat = np.linalg.multi_dot((X, V * w, V.T, X.T)) - hat_y = hat.dot(y) - hat_diag = np.diag(hat) - return (1 - hat_diag) / alpha, (y - hat_y) / alpha + A = (V * w).dot(V.T) + Xm = self._X_offset + + def matvec(v): + return safe_sparse_dot( + X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] + + def matmat(v): + return safe_sparse_dot( + X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] + + def rmatvec(v): + res = np.empty(X.shape[1] + 1) + res[:-1] = safe_sparse_dot( + X.T, v, dense_output=True) - Xm * v.sum(axis=0) + res[-1] = v.sum(axis=0) + return res + + Xop = sparse.linalg.LinearOperator( + matvec=matvec, matmat=matmat, rmatvec=rmatvec, + shape=(X.shape[0], X.shape[1] + 1)) + AXy = A.dot(Xop.rmatvec(y)) + y_pred = Xop.dot(AXy) + hat_diag = _sparse_multidot_diag(X, A, Xm) + return (1 - hat_diag) / alpha, (y - y_pred) / alpha def fit(self, X, y, sample_weight=None): # if (sparse.issparse(X) and (X.shape[0] > X.shape[1]) @@ -1197,6 +1216,33 @@ def fit(self, X, y, sample_weight=None): # 'gcv_mode to "eigen", which can cause performance issues ' # 'if n_samples is much larger than n_features') super().fit(X, y, sample_weight=sample_weight) + return self + + +def _sparse_multidot_diag(X, A, Xm): + batch_size = X.shape[1] + diag = np.empty(X.shape[0]) + for start in range(0, X.shape[0], batch_size): + batch = slice(start, start + batch_size, 1) + X_batch = np.ones((X[batch].shape[0], X.shape[1] + 1)) + X_batch[:, :-1] = X[batch].A - Xm + diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) + return diag + + + +# def _sparse_multidot_diag(A, B, C): +# batch_size = A.shape[1] +# diag = np.empty(A.shape[0]) +# for start in range(0, A.shape[0], batch_size): +# batch = slice(start, start + batch_size, 1) +# C_batch = C[:, batch] +# if sparse.issparse(C_batch): +# C_batch = C_batch.A +# diag[batch] = ( +# safe_sparse_dot(A[batch], B, dense_output=True) +# * C_batch.T).sum(axis=1) +# return diag class _BaseRidgeCV(LinearModel, MultiOutputMixin): From f63516ef74b3dea6b70f74c2c02834c69a7de793 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 15 Mar 2019 15:05:50 +0100 Subject: [PATCH 004/103] iter --- sklearn/linear_model/ridge.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 912488d3ca686..a9f76aa189281 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1178,7 +1178,6 @@ def _pre_compute_covariance(self, X, y): return s, V, X def _errors_and_values_svd_helper(self, alpha, y, s, V, X): - print('sparse svd: ', alpha) n, p = X.shape w = 1 / (s + alpha) A = (V * w).dot(V.T) From 00e6f883822463943ceff8401e78233ac5136065 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 15 Mar 2019 15:12:19 +0100 Subject: [PATCH 005/103] remove warning --- sklearn/linear_model/ridge.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index a9f76aa189281..6b7c8d4d863d6 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1206,17 +1206,6 @@ def rmatvec(v): hat_diag = _sparse_multidot_diag(X, A, Xm) return (1 - hat_diag) / alpha, (y - y_pred) / alpha - def fit(self, X, y, sample_weight=None): - # if (sparse.issparse(X) and (X.shape[0] > X.shape[1]) - # and self.fit_intercept and self.gcv_mode != 'eigen'): - # warnings.warn( - # 'Cannot use an SVD of X if X is sparse ' - # 'and fit_intercept is true. will therefore set ' - # 'gcv_mode to "eigen", which can cause performance issues ' - # 'if n_samples is much larger than n_features') - super().fit(X, y, sample_weight=sample_weight) - return self - def _sparse_multidot_diag(X, A, Xm): batch_size = X.shape[1] From ba8bf8a036be8d4c75f4c6141e56ba477ee4cce1 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 15 Mar 2019 15:24:47 +0100 Subject: [PATCH 006/103] iter --- sklearn/linear_model/ridge.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 6b7c8d4d863d6..8a45eb31f74b6 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1141,7 +1141,7 @@ def _set_intercept(self, X_offset, y_offset, X_scale): def _pre_compute_gram(self, X, y): K, X_m = _centered_gram(X) - K += np.ones_like(K) + K += 1. v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) self._X_offset = X_m @@ -1178,6 +1178,8 @@ def _pre_compute_covariance(self, X, y): return s, V, X def _errors_and_values_svd_helper(self, alpha, y, s, V, X): + if not sparse.issparse(X): + return super()._errors_and_values_svd_helper(alpha, y, s, V, X) n, p = X.shape w = 1 / (s + alpha) A = (V * w).dot(V.T) From 1ef72e7be5391edeeedc5f4a6800e0dad1c50ae3 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 18 Mar 2019 16:39:52 +0100 Subject: [PATCH 007/103] details --- sklearn/linear_model/ridge.py | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 8a45eb31f74b6..27301013c1c8c 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1129,16 +1129,16 @@ def _centered_covariance(X): class _WIPNewRidgeGCV(_RidgeGCV): - def _pre_compute(self, X, y, centered_kernel): - if sparse.issparse(X) and self.fit_intercept: - return self._pre_compute_gram(X, y) - return super()._pre_compute(X, y, centered_kernel) - def _set_intercept(self, X_offset, y_offset, X_scale): if getattr(self, '_X_offset', None) is not None: X_offset = self._X_offset super()._set_intercept(X_offset, y_offset, X_scale) + def _pre_compute(self, X, y, centered_kernel): + if sparse.issparse(X) and self.fit_intercept: + return self._pre_compute_gram(X, y) + return super()._pre_compute(X, y, centered_kernel) + def _pre_compute_gram(self, X, y): K, X_m = _centered_gram(X) K += 1. @@ -1147,20 +1147,6 @@ def _pre_compute_gram(self, X, y): self._X_offset = X_m return v, Q, QT_y - def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): - w = 1. / (v + alpha) - constant_column = np.var(Q, 0) < 1.e-12 - if not constant_column.any(): - warnings.warn('intercept was not a singular vector of gram matrix') - w[constant_column] = 0 - - c = np.dot(Q, self._diag_dot(w, QT_y)) - G_diag = self._decomp_diag(w, Q) - # handle case where y is 2-d - if len(y.shape) != 1: - G_diag = G_diag[:, np.newaxis] - return G_diag, c - def _pre_compute_svd(self, X, y, centered_kernel): if sparse.issparse(X): return self._pre_compute_covariance(X, y) @@ -1181,7 +1167,10 @@ def _errors_and_values_svd_helper(self, alpha, y, s, V, X): if not sparse.issparse(X): return super()._errors_and_values_svd_helper(alpha, y, s, V, X) n, p = X.shape + intercept_dim, *_ = np.where(s == n) w = 1 / (s + alpha) + if len(intercept_dim == 1): + w[intercept_dim[0]] = 1 / s[intercept_dim[0]] A = (V * w).dot(V.T) Xm = self._X_offset @@ -1202,11 +1191,12 @@ def rmatvec(v): Xop = sparse.linalg.LinearOperator( matvec=matvec, matmat=matmat, rmatvec=rmatvec, - shape=(X.shape[0], X.shape[1] + 1)) + shape=(X.shape[0], X.shape[1] + 1), dtype=X.dtype) AXy = A.dot(Xop.rmatvec(y)) - y_pred = Xop.dot(AXy) + y_hat = Xop.dot(AXy) hat_diag = _sparse_multidot_diag(X, A, Xm) - return (1 - hat_diag) / alpha, (y - y_pred) / alpha + # return (1 - hat_diag), (y - y_hat) + return (1 - hat_diag) / alpha, (y - y_hat) / alpha def _sparse_multidot_diag(X, A, Xm): From 557261d3282b62087dba95af4bb7cd27fd7b8921 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 18 Mar 2019 17:08:55 +0100 Subject: [PATCH 008/103] only keep first min(n,p) singular values --- sklearn/linear_model/ridge.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 27301013c1c8c..01a3044a174be 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1159,7 +1159,10 @@ def _pre_compute_covariance(self, X, y): cov[-1] = 0 cov[:, -1] = 0 cov[-1, -1] = n + kernel_size = max(0, X.shape[1] - X.shape[0]) s, V = linalg.eigh(cov) + s = s[kernel_size:] + V = V[:, kernel_size:] self._X_offset = X_m return s, V, X From 027e8418c925be158e4cdb4ac5885cdbc2ecd008 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 18 Mar 2019 17:12:56 +0100 Subject: [PATCH 009/103] comment --- sklearn/linear_model/ridge.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 01a3044a174be..b7e757ba6ff37 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1148,6 +1148,7 @@ def _pre_compute_gram(self, X, y): return v, Q, QT_y def _pre_compute_svd(self, X, y, centered_kernel): + # TODO: handle case of fit_intercept=False if sparse.issparse(X): return self._pre_compute_covariance(X, y) return super()._pre_compute_svd(X, y, centered_kernel) From 3bd936a4221ecd22f426368287d1e8478bfefc27 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sun, 7 Apr 2019 18:41:10 +0200 Subject: [PATCH 010/103] remove _WIPRidgeCV class --- sklearn/linear_model/ridge.py | 320 ++++++++++++----------- sklearn/linear_model/tests/test_ridge.py | 15 +- 2 files changed, 175 insertions(+), 160 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index b7e757ba6ff37..07e32a423f92e 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -858,6 +858,46 @@ def classes_(self): return self._label_binarizer.classes_ +def _centered_gram(X, center=True): + if not center: + X_m = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X, X.T, dense_output=True), X_m + if sparse.issparse(X): + X_m, _ = mean_variance_axis(X, axis=0) + else: + X_m = X.mean(axis=0) + X_mX = safe_sparse_dot(X_m, X.T, dense_output=True) + return (safe_sparse_dot(X, X.T, dense_output=True) + np.dot(X_m, X_m) + - X_mX - X_mX[:, None], X_m) + + +def _centered_covariance(X, center=True): + if not center: + X_m = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X.T, X, dense_output=True), X_m + n = X.shape[0] + if sparse.issparse(X): + X_m, _ = mean_variance_axis(X, axis=0) + else: + X_m = X.mean(axis=0) + return safe_sparse_dot( + X.T, X, dense_output=True) - n * np.outer(X_m, X_m), X_m + + +def _sparse_multidot_diag(X, A, Xm, with_intercept=True): + batch_size = X.shape[1] + diag = np.empty(X.shape[0]) + for start in range(0, X.shape[0], batch_size): + batch = slice(start, start + batch_size, 1) + X_batch = np.ones((X[batch].shape[0], X.shape[1] + with_intercept)) + if with_intercept: + X_batch[:, :-1] = X[batch].A - Xm + else: + X_batch = X[batch].A + diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) + return diag + + class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation @@ -909,18 +949,6 @@ def __init__(self, alphas=(0.1, 1.0, 10.0), self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values - def _pre_compute(self, X, y, centered_kernel=True): - # even if X is very sparse, K is usually very dense - K = safe_sparse_dot(X, X.T, dense_output=True) - # the following emulates an additional constant regressor - # corresponding to fit_intercept=True - # but this is done only when the features have been centered - if centered_kernel: - K += np.ones_like(K) - v, Q = linalg.eigh(K) - QT_y = np.dot(Q.T, y) - return v, Q, QT_y - def _decomp_diag(self, v_prime, Q): # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) return (v_prime * Q ** 2).sum(axis=-1) @@ -932,6 +960,20 @@ def _diag_dot(self, D, B): D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B + def _set_intercept(self, X_offset, y_offset, X_scale): + if getattr(self, '_X_offset', None) is not None: + X_offset = X_offset + self._X_offset + super()._set_intercept(X_offset, y_offset, X_scale) + + def _pre_compute(self, X, y): + K, X_m = _centered_gram(X, self.fit_intercept) + if self.fit_intercept: + K += 1. + v, Q = linalg.eigh(K) + QT_y = np.dot(Q.T, y) + self._X_offset = X_m + return v, Q, QT_y + def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): """Helper function to avoid code duplication between self._errors and self._values. @@ -941,9 +983,10 @@ def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): We don't construct matrix G, instead compute action on y & diagonal. """ w = 1. / (v + alpha) - constant_column = np.var(Q, 0) < 1.e-12 - # detect constant columns - w[constant_column] = 0 # cancel the regularization for the intercept + if self.fit_intercept: + constant_column = np.var(Q, 0) < 1.e-12 + # detect constant columns + w[constant_column] = 0 # cancel regularization for the intercept c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) @@ -960,10 +1003,87 @@ def _values(self, alpha, y, v, Q, QT_y): G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y) return y - (c / G_diag), c - def _pre_compute_svd(self, X, y, centered_kernel=True): - if sparse.issparse(X): - raise TypeError("SVD not supported for sparse matrices") - if centered_kernel: + def _pre_compute_svd_sparse(self, X, y): + n, p = X.shape + cov = np.empty((p + 1, p + 1)) + cov[:-1, :-1], X_m = _centered_covariance(X, self.fit_intercept) + if not self.fit_intercept: + cov = cov[:-1, :-1] + else: + cov[-1] = 0 + cov[:, -1] = 0 + cov[-1, -1] = n + kernel_size = max(0, X.shape[1] - X.shape[0]) + s, V = linalg.eigh(cov) + s = s[kernel_size:] + V = V[:, kernel_size:] + self._X_offset = X_m + return s, V, X + + def _errors_and_values_svd_helper_sparse_no_intercept( + self, alpha, y, s, V, X): + n, p = X.shape + w = 1 / (s + alpha) + A = (V * w).dot(V.T) + Xm = self._X_offset + AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) + y_hat = safe_sparse_dot(X, AXy, dense_output=True) + hat_diag = _sparse_multidot_diag(X, A, Xm, False) + # return (1 - hat_diag), (y - y_hat) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _errors_and_values_svd_helper_sparse_intercept(self, alpha, y, s, V, X): + """Helper function to avoid code duplication between self._errors_svd + and self._values_svd. + """ + n, p = X.shape + intercept_dim, *_ = np.where(s == n) + w = 1 / (s + alpha) + if len(intercept_dim) == 1: + w[intercept_dim[0]] = 1 / s[intercept_dim[0]] + A = (V * w).dot(V.T) + Xm = self._X_offset + + def matvec(v): + return safe_sparse_dot( + X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] + + def matmat(v): + return safe_sparse_dot( + X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] + + def rmatvec(v): + v = v.ravel() + res = np.empty(X.shape[1] + 1) + res[:-1] = safe_sparse_dot( + X.T, v, dense_output=True) - Xm * v.sum(axis=0) + res[-1] = v.sum(axis=0) + return res + + Xop = sparse.linalg.LinearOperator( + matvec=matvec, matmat=matmat, rmatvec=rmatvec, + shape=(X.shape[0], X.shape[1] + 1), dtype=X.dtype) + AXy = A.dot(Xop.adjoint().dot(y)) + y_hat = Xop.dot(AXy) + hat_diag = _sparse_multidot_diag(X, A, Xm) + # return (1 - hat_diag), (y - y_hat) + if len(y.shape) != 1: + # handle case where y is 2-d + hat_diag = hat_diag[:, np.newaxis] + return (1 - hat_diag) / alpha, (y - y_hat) / alpha + + def _errors_and_values_svd_helper_sparse(self, alpha, y, s, V, X): + if self.fit_intercept: + return self._errors_and_values_svd_helper_sparse_intercept( + alpha, y, s, V, X) + return self._errors_and_values_svd_helper_sparse_no_intercept( + alpha, y, s, V, X) + + def _pre_compute_svd_dense(self, X, y): + if self.fit_intercept: X = np.hstack((X, np.ones((X.shape[0], 1)))) # to emulate fit_intercept=True situation, add a column on ones # Note that by centering, the other columns are orthogonal to that one @@ -972,7 +1092,7 @@ def _pre_compute_svd(self, X, y, centered_kernel=True): UT_y = np.dot(U.T, y) return v, U, UT_y - def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y): + def _errors_and_values_svd_helper_dense(self, alpha, y, v, U, UT_y): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. """ @@ -988,12 +1108,25 @@ def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_svd(self, alpha, y, v, U, UT_y): - G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y) + + def _errors_svd_sparse(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_svd_helper_sparse( + alpha, y, v, U, UT_y) + return (c / G_diag) ** 2, c + + def _values_svd_sparse(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_svd_helper_sparse( + alpha, y, v, U, UT_y) + return y - (c / G_diag), c + + def _errors_svd_dense(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_svd_helper_dense( + alpha, y, v, U, UT_y) return (c / G_diag) ** 2, c - def _values_svd(self, alpha, y, v, U, UT_y): - G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y) + def _values_svd_dense(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_svd_helper_dense( + alpha, y, v, U, UT_y) return y - (c / G_diag), c def fit(self, X, y, sample_weight=None): @@ -1028,7 +1161,7 @@ def fit(self, X, y, sample_weight=None): with_sw = len(np.shape(sample_weight)) if gcv_mode is None or gcv_mode == 'auto': - if sparse.issparse(X) or n_features > n_samples or with_sw: + if n_features > n_samples: gcv_mode = 'eigen' else: gcv_mode = 'svd' @@ -1044,18 +1177,21 @@ def fit(self, X, y, sample_weight=None): _values = self._values elif gcv_mode == 'svd': # assert n_samples >= n_features - _pre_compute = self._pre_compute_svd - _errors = self._errors_svd - _values = self._values_svd + if sparse.issparse(X): + _pre_compute = self._pre_compute_svd_sparse + _errors = self._errors_svd_sparse + _values = self._values_svd_sparse + else: + _pre_compute = self._pre_compute_svd_dense + _errors = self._errors_svd_dense + _values = self._values_svd_dense else: raise ValueError('bad gcv_mode "%s"' % gcv_mode) if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) - centered_kernel = not sparse.issparse(X) and self.fit_intercept - - v, Q, QT_y = _pre_compute(X, y, centered_kernel) + v, Q, QT_y = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] @@ -1107,128 +1243,6 @@ def identity_estimator(): return self -def _centered_gram(X): - if sparse.issparse(X): - X_m, _ = mean_variance_axis(X, axis=0) - else: - X_m = X.mean(axis=0) - X_mX = safe_sparse_dot(X_m, X.T, dense_output=True) - return (safe_sparse_dot(X, X.T, dense_output=True) + np.dot(X_m, X_m) - - X_mX - X_mX[:, None], X_m) - - -def _centered_covariance(X): - n = X.shape[0] - if sparse.issparse(X): - X_m, _ = mean_variance_axis(X, axis=0) - else: - X_m = X.mean(axis=0) - return safe_sparse_dot( - X.T, X, dense_output=True) - n * np.outer(X_m, X_m), X_m - - -class _WIPNewRidgeGCV(_RidgeGCV): - - def _set_intercept(self, X_offset, y_offset, X_scale): - if getattr(self, '_X_offset', None) is not None: - X_offset = self._X_offset - super()._set_intercept(X_offset, y_offset, X_scale) - - def _pre_compute(self, X, y, centered_kernel): - if sparse.issparse(X) and self.fit_intercept: - return self._pre_compute_gram(X, y) - return super()._pre_compute(X, y, centered_kernel) - - def _pre_compute_gram(self, X, y): - K, X_m = _centered_gram(X) - K += 1. - v, Q = linalg.eigh(K) - QT_y = np.dot(Q.T, y) - self._X_offset = X_m - return v, Q, QT_y - - def _pre_compute_svd(self, X, y, centered_kernel): - # TODO: handle case of fit_intercept=False - if sparse.issparse(X): - return self._pre_compute_covariance(X, y) - return super()._pre_compute_svd(X, y, centered_kernel) - - def _pre_compute_covariance(self, X, y): - n, p = X.shape - cov = np.empty((p + 1, p + 1)) - cov[:-1, :-1], X_m = _centered_covariance(X) - cov[-1] = 0 - cov[:, -1] = 0 - cov[-1, -1] = n - kernel_size = max(0, X.shape[1] - X.shape[0]) - s, V = linalg.eigh(cov) - s = s[kernel_size:] - V = V[:, kernel_size:] - self._X_offset = X_m - return s, V, X - - def _errors_and_values_svd_helper(self, alpha, y, s, V, X): - if not sparse.issparse(X): - return super()._errors_and_values_svd_helper(alpha, y, s, V, X) - n, p = X.shape - intercept_dim, *_ = np.where(s == n) - w = 1 / (s + alpha) - if len(intercept_dim == 1): - w[intercept_dim[0]] = 1 / s[intercept_dim[0]] - A = (V * w).dot(V.T) - Xm = self._X_offset - - def matvec(v): - return safe_sparse_dot( - X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] - - def matmat(v): - return safe_sparse_dot( - X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] - - def rmatvec(v): - res = np.empty(X.shape[1] + 1) - res[:-1] = safe_sparse_dot( - X.T, v, dense_output=True) - Xm * v.sum(axis=0) - res[-1] = v.sum(axis=0) - return res - - Xop = sparse.linalg.LinearOperator( - matvec=matvec, matmat=matmat, rmatvec=rmatvec, - shape=(X.shape[0], X.shape[1] + 1), dtype=X.dtype) - AXy = A.dot(Xop.rmatvec(y)) - y_hat = Xop.dot(AXy) - hat_diag = _sparse_multidot_diag(X, A, Xm) - # return (1 - hat_diag), (y - y_hat) - return (1 - hat_diag) / alpha, (y - y_hat) / alpha - - -def _sparse_multidot_diag(X, A, Xm): - batch_size = X.shape[1] - diag = np.empty(X.shape[0]) - for start in range(0, X.shape[0], batch_size): - batch = slice(start, start + batch_size, 1) - X_batch = np.ones((X[batch].shape[0], X.shape[1] + 1)) - X_batch[:, :-1] = X[batch].A - Xm - diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) - return diag - - - -# def _sparse_multidot_diag(A, B, C): -# batch_size = A.shape[1] -# diag = np.empty(A.shape[0]) -# for start in range(0, A.shape[0], batch_size): -# batch = slice(start, start + batch_size, 1) -# C_batch = C[:, batch] -# if sparse.issparse(C_batch): -# C_batch = C_batch.A -# diag[batch] = ( -# safe_sparse_dot(A[batch], B, dense_output=True) -# * C_batch.T).sum(axis=1) -# return diag - - class _BaseRidgeCV(LinearModel, MultiOutputMixin): def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index eca4a53f4f507..a374e544ef4c7 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -327,7 +327,7 @@ def _test_ridge_loo(filter_): # because fit_intercept is applied # generalized cross-validation (efficient leave-one-out) - decomp = ridge_gcv._pre_compute(X_diabetes_, y_diabetes, fit_intercept) + decomp = ridge_gcv._pre_compute(X_diabetes_, y_diabetes) errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp) values, c = ridge_gcv._values(1.0, y_diabetes, *decomp) @@ -350,9 +350,9 @@ def _test_ridge_loo(filter_): # generalized cross-validation (efficient leave-one-out, # SVD variation) - decomp = ridge_gcv._pre_compute_svd(X_diabetes_, y_diabetes, fit_intercept) - errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp) - values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp) + decomp = ridge_gcv._pre_compute_svd_dense(X_diabetes_, y_diabetes) + errors3, c = ridge_gcv._errors_svd_dense(ridge.alpha, y_diabetes, *decomp) + values3, c = ridge_gcv._values_svd_dense(ridge.alpha, y_diabetes, *decomp) # check that efficient and SVD efficient LOO give same results assert_almost_equal(errors, errors3) @@ -862,14 +862,15 @@ def test_errors_and_values_svd_helper(): v = rng.randn(p) U = rng.randn(n, p) UT_y = U.T.dot(y) - G_diag, c = ridgecv._errors_and_values_svd_helper(alpha, y, v, U, UT_y) + G_diag, c = ridgecv._errors_and_values_svd_helper_dense( + alpha, y, v, U, UT_y) # test that helper function behaves as expected - out, c_ = ridgecv._errors_svd(alpha, y, v, U, UT_y) + out, c_ = ridgecv._errors_svd_dense(alpha, y, v, U, UT_y) np.testing.assert_array_equal(out, (c / G_diag) ** 2) np.testing.assert_array_equal(c, c) - out, c_ = ridgecv._values_svd(alpha, y, v, U, UT_y) + out, c_ = ridgecv._values_svd_dense(alpha, y, v, U, UT_y) np.testing.assert_array_equal(out, y - (c / G_diag)) np.testing.assert_array_equal(c_, c) From ea3971ceb5afe5e48daf6c6ad7e94788954db0b9 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sun, 7 Apr 2019 18:48:43 +0200 Subject: [PATCH 011/103] pep8 --- sklearn/linear_model/ridge.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 07e32a423f92e..2b1bf4ec28886 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1035,7 +1035,8 @@ def _errors_and_values_svd_helper_sparse_no_intercept( hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _errors_and_values_svd_helper_sparse_intercept(self, alpha, y, s, V, X): + def _errors_and_values_svd_helper_sparse_intercept( + self, alpha, y, s, V, X): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. """ @@ -1108,7 +1109,6 @@ def _errors_and_values_svd_helper_dense(self, alpha, y, v, U, UT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_svd_sparse(self, alpha, y, v, U, UT_y): G_diag, c = self._errors_and_values_svd_helper_sparse( alpha, y, v, U, UT_y) From 074d139be90450bc36d181ffbd4a1850400353d0 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 10 Apr 2019 18:38:03 +0200 Subject: [PATCH 012/103] _check_gcv_mode util --- sklearn/linear_model/ridge.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 2b1bf4ec28886..d3c7f68d1cedb 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -898,6 +898,19 @@ def _sparse_multidot_diag(X, A, Xm, with_intercept=True): return diag +def _check_gcv_mode(X, sample_weights): + sparse_x = sparse.issparse(X) + with_sample_weights = np.ndim(sample_weights) > 0 + if with_sample_weights and sparse_x: + warnings.warn( + 'generalized cross-validation with sparse X and sample weights' + 'not supported yet') + return None + if X.shape[0] > X.shape[1]: + return 'svd' + return 'eigen' + + class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation @@ -1158,18 +1171,10 @@ def fit(self, X, y, sample_weight=None): sample_weight=sample_weight) gcv_mode = self.gcv_mode - with_sw = len(np.shape(sample_weight)) - + best_gcv_mode = _check_gcv_mode(X, sample_weight) + assert (best_gcv_mode is not None) if gcv_mode is None or gcv_mode == 'auto': - if n_features > n_samples: - gcv_mode = 'eigen' - else: - gcv_mode = 'svd' - elif gcv_mode == "svd" and with_sw: - # FIXME non-uniform sample weights not yet supported - warnings.warn("non-uniform sample weights unsupported for svd, " - "forcing usage of eigen") - gcv_mode = 'eigen' + gcv_mode = best_gcv_mode if gcv_mode == 'eigen': _pre_compute = self._pre_compute From fbdda415768d860678b68e81b4f5286c0719fa36 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sat, 13 Apr 2019 02:44:07 +0200 Subject: [PATCH 013/103] fall back to 10-fold when sample weights, gcv and sparse design --- sklearn/linear_model/ridge.py | 18 ++++++++++++------ sklearn/linear_model/tests/test_ridge.py | 7 ++++--- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index d3c7f68d1cedb..9e04386a2ccc1 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -889,7 +889,8 @@ def _sparse_multidot_diag(X, A, Xm, with_intercept=True): diag = np.empty(X.shape[0]) for start in range(0, X.shape[0], batch_size): batch = slice(start, start + batch_size, 1) - X_batch = np.ones((X[batch].shape[0], X.shape[1] + with_intercept)) + X_batch = np.ones( + (X[batch].shape[0], X.shape[1] + with_intercept), dtype=X.dtype) if with_intercept: X_batch[:, :-1] = X[batch].A - Xm else: @@ -904,7 +905,7 @@ def _check_gcv_mode(X, sample_weights): if with_sample_weights and sparse_x: warnings.warn( 'generalized cross-validation with sparse X and sample weights' - 'not supported yet') + ' not supported yet') return None if X.shape[0] > X.shape[1]: return 'svd' @@ -1196,7 +1197,7 @@ def fit(self, X, y, sample_weight=None): if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) - v, Q, QT_y = _pre_compute(X, y) + precomputed = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] @@ -1211,9 +1212,9 @@ def fit(self, X, y, sample_weight=None): for i, alpha in enumerate(self.alphas): if error: - out, c = _errors(float(alpha), y, v, Q, QT_y) + out, c = _errors(float(alpha), y, *precomputed) else: - out, c = _values(float(alpha), y, v, Q, QT_y) + out, c = _values(float(alpha), y, *precomputed) cv_values[:, i] = out.ravel() C.append(c) @@ -1279,7 +1280,12 @@ def fit(self, X, y, sample_weight=None): ------- self : object """ - if self.cv is None: + cv = self.cv + if self.cv is None and sparse.issparse(X) and np.ndim(sample_weight): + warnings.warn('sample weights with sparse X and gcv not supported, ' + 'falling back to 10-fold cross-validation') + cv = 10 + if cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, normalize=self.normalize, diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index a374e544ef4c7..997831c2d4acd 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -384,9 +384,10 @@ def _test_ridge_loo(filter_): assert_equal(ridge_gcv4.alpha_, alpha_) # check that we get same best alpha with sample weights - ridge_gcv.fit(filter_(X_diabetes), y_diabetes, - sample_weight=np.ones(n_samples)) - assert_equal(ridge_gcv.alpha_, alpha_) + if filter_ == DENSE_FILTER: + ridge_gcv.fit(filter_(X_diabetes), y_diabetes, + sample_weight=np.ones(n_samples)) + assert_equal(ridge_gcv.alpha_, alpha_) # simulate several responses Y = np.vstack((y_diabetes, y_diabetes)).T From a2ffa1cf8abad9bf3fc27a744be58510c34a11e3 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 18:23:07 +0200 Subject: [PATCH 014/103] add test to compare results of GCV with gridsearch leave-one-out --- sklearn/linear_model/tests/test_ridge.py | 26 ++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 997831c2d4acd..29f97d0925711 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -310,6 +310,32 @@ def test_ridge_individual_penalties(): assert_raises(ValueError, ridge.fit, X, y) +def test_ridge_gcv_vs_k_fold(): + alphas = [1e-3, .1, 1., 10., 1e3] + shapes = [(71, 52), (71, 83)] + for (n_samples, n_features), fit_intercept in product( + shapes, [True, False]): + x, y = make_regression( + n_samples=n_samples, n_features=n_features, n_targets=3, + random_state=0, shuffle=False, noise=30.) + x += 30 * np.random.RandomState(0).randn(x.shape[1]) + x_s = sp.csr_matrix(x) + loo = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, + alphas=alphas, scoring='neg_mean_squared_error') + loo.fit(x, y) + for gcv_mode, sparse_x in product(['svd', 'eigen'], [True, False]): + print('{}, {}, sparse: {}, intercept: {}, alpha: {}'.format( + (n_samples, n_features), gcv_mode, sparse_x, fit_intercept, + loo.alpha_)) + xx = x_s if sparse_x else x + gcv = RidgeCV(gcv_mode=gcv_mode, fit_intercept=fit_intercept, + alphas=alphas) + gcv.fit(xx, y) + assert gcv.alpha_ == loo.alpha_ + assert np.allclose(gcv.coef_, loo.coef_, rtol=1e-3) + assert np.allclose(gcv.intercept_, loo.intercept_, rtol=1e-3) + + def _test_ridge_loo(filter_): # test that can work with both dense or sparse matrices n_samples = X_diabetes.shape[0] From 5ae7c753c41ff6ea36ee226fa3eb55a25737f036 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 18:36:10 +0200 Subject: [PATCH 015/103] test with and without normalize --- sklearn/linear_model/ridge.py | 2 +- sklearn/linear_model/tests/test_ridge.py | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 9e04386a2ccc1..fed2e600ca95a 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -976,7 +976,7 @@ def _diag_dot(self, D, B): def _set_intercept(self, X_offset, y_offset, X_scale): if getattr(self, '_X_offset', None) is not None: - X_offset = X_offset + self._X_offset + X_offset = X_offset + self._X_offset * X_scale super()._set_intercept(X_offset, y_offset, X_scale) def _pre_compute(self, X, y): diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 29f97d0925711..f07a04a033b9b 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -313,15 +313,16 @@ def test_ridge_individual_penalties(): def test_ridge_gcv_vs_k_fold(): alphas = [1e-3, .1, 1., 10., 1e3] shapes = [(71, 52), (71, 83)] - for (n_samples, n_features), fit_intercept in product( - shapes, [True, False]): + for (n_samples, n_features), fit_intercept, normalize in product( + shapes, [True, False], [True, False]): x, y = make_regression( n_samples=n_samples, n_features=n_features, n_targets=3, random_state=0, shuffle=False, noise=30.) x += 30 * np.random.RandomState(0).randn(x.shape[1]) x_s = sp.csr_matrix(x) loo = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, - alphas=alphas, scoring='neg_mean_squared_error') + alphas=alphas, scoring='neg_mean_squared_error', + normalize=normalize) loo.fit(x, y) for gcv_mode, sparse_x in product(['svd', 'eigen'], [True, False]): print('{}, {}, sparse: {}, intercept: {}, alpha: {}'.format( @@ -329,11 +330,11 @@ def test_ridge_gcv_vs_k_fold(): loo.alpha_)) xx = x_s if sparse_x else x gcv = RidgeCV(gcv_mode=gcv_mode, fit_intercept=fit_intercept, - alphas=alphas) + alphas=alphas, normalize=normalize) gcv.fit(xx, y) assert gcv.alpha_ == loo.alpha_ - assert np.allclose(gcv.coef_, loo.coef_, rtol=1e-3) - assert np.allclose(gcv.intercept_, loo.intercept_, rtol=1e-3) + assert np.allclose(gcv.coef_, loo.coef_, rtol=1e-4) + assert np.allclose(gcv.intercept_, loo.intercept_, rtol=1e-4) def _test_ridge_loo(filter_): From 9facc71bee5afec1b6d562469576e605e886761e Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 19:15:54 +0200 Subject: [PATCH 016/103] add sparse svd to _test_ridge_loo --- sklearn/linear_model/tests/test_ridge.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 64f4cdb180588..2d20f754d2521 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -385,6 +385,17 @@ def _test_ridge_loo(filter_): assert_almost_equal(errors, errors3) assert_almost_equal(values, values3) + # generalized cross-validation (efficient leave-one-out, + # SVD variation) + decomp = ridge_gcv._pre_compute_svd_sparse( + sp.csr_matrix(X_diabetes_), y_diabetes) + errors4, c = ridge_gcv._errors_svd_sparse(ridge.alpha, y_diabetes, *decomp) + values4, c = ridge_gcv._values_svd_sparse(ridge.alpha, y_diabetes, *decomp) + + # check that efficient and SVD efficient LOO give same results + assert_almost_equal(errors, errors4) + assert_almost_equal(values, values4) + # check best alpha ridge_gcv.fit(filter_(X_diabetes), y_diabetes) alpha_ = ridge_gcv.alpha_ @@ -531,7 +542,7 @@ def test_dense_sparse(test_func): def test_ridge_cv_sparse_svd(): X = sp.csr_matrix(X_diabetes) ridge = RidgeCV(gcv_mode="svd") - assert_raises(TypeError, ridge.fit, X) + assert_raises(TypeError, ridge.fit, X, y_diabetes) def test_ridge_sparse_svd(): From 3309421b301bbc94fac2f7148921df2eb6d129d6 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 19:20:10 +0200 Subject: [PATCH 017/103] remove test_ridge_cv_sparse_svd --- sklearn/linear_model/tests/test_ridge.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 2d20f754d2521..76b74a857c33d 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -539,12 +539,6 @@ def test_dense_sparse(test_func): check_dense_sparse(test_func) -def test_ridge_cv_sparse_svd(): - X = sp.csr_matrix(X_diabetes) - ridge = RidgeCV(gcv_mode="svd") - assert_raises(TypeError, ridge.fit, X, y_diabetes) - - def test_ridge_sparse_svd(): X = sp.csc_matrix(rng.rand(100, 10)) y = rng.rand(100) From 9e97a87f7fe016e49f33ca6a99967cf0d4b86bfe Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 19:29:19 +0200 Subject: [PATCH 018/103] smaller test data for ridgecv to speed up test --- sklearn/linear_model/tests/test_ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 76b74a857c33d..d8e14a13b9c9a 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -312,7 +312,7 @@ def test_ridge_individual_penalties(): def test_ridge_gcv_vs_k_fold(): alphas = [1e-3, .1, 1., 10., 1e3] - shapes = [(71, 52), (71, 83)] + shapes = [(11, 8), (11, 20)] for (n_samples, n_features), fit_intercept, normalize in product( shapes, [True, False], [True, False]): x, y = make_regression( From 543800293d56b0c204f08ef5e8476a3f144af629 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 19:33:07 +0200 Subject: [PATCH 019/103] add uninformative features --- sklearn/linear_model/tests/test_ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index d8e14a13b9c9a..93e286f961f1d 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -317,7 +317,7 @@ def test_ridge_gcv_vs_k_fold(): shapes, [True, False], [True, False]): x, y = make_regression( n_samples=n_samples, n_features=n_features, n_targets=3, - random_state=0, shuffle=False, noise=30.) + random_state=0, shuffle=False, noise=30., n_informative=5) x += 30 * np.random.RandomState(0).randn(x.shape[1]) x_s = sp.csr_matrix(x) loo = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, From 06555a7253850867021f034f294968fb94eb43fb Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 21:31:16 +0200 Subject: [PATCH 020/103] add tests with sample weights --- sklearn/linear_model/ridge.py | 33 ++++++++++-------- sklearn/linear_model/tests/test_ridge.py | 44 ++++++++++++++++++++++-- 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 933de8a569b40..c2608f83b8410 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -961,10 +961,9 @@ def _check_gcv_mode(X, sample_weights): sparse_x = sparse.issparse(X) with_sample_weights = np.ndim(sample_weights) > 0 if with_sample_weights and sparse_x: - warnings.warn( - 'generalized cross-validation with sparse X and sample weights' - ' not supported yet') - return None + raise ValueError( + 'sample weights not (yet) supported by ' + 'generalized cross-validation when X is sparse') if X.shape[0] > X.shape[1]: return 'svd' return 'eigen' @@ -1231,7 +1230,6 @@ def fit(self, X, y, sample_weight=None): gcv_mode = self.gcv_mode best_gcv_mode = _check_gcv_mode(X, sample_weight) - assert (best_gcv_mode is not None) if gcv_mode is None or gcv_mode == 'auto': gcv_mode = best_gcv_mode @@ -1263,10 +1261,10 @@ def fit(self, X, y, sample_weight=None): scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None - if np.any(self.alphas < 0): - raise ValueError("alphas cannot be negative. " - "Got {} containing some " - "negative value instead.".format(self.alphas)) + if np.any(self.alphas <= 0): + raise ValueError( + "alphas must be positive. Got {} containing some " + "negative or null value instead.".format(self.alphas)) for i, alpha in enumerate(self.alphas): if error: @@ -1339,10 +1337,13 @@ def fit(self, X, y, sample_weight=None): self : object """ cv = self.cv - if self.cv is None and sparse.issparse(X) and np.ndim(sample_weight): - warnings.warn('sample weights with sparse X and gcv not supported, ' - 'falling back to 10-fold cross-validation') - cv = 10 + gcv_modes = {None, 'auto', 'svd', 'eigen'} + if self.cv in gcv_modes and sparse.issparse(X) and np.ndim( + sample_weight): + warnings.warn( + 'sample weights with sparse X and gcv not supported, ' + 'falling back to 5-fold cross-validation') + cv = 5 if cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, @@ -1359,9 +1360,11 @@ def fit(self, X, y, sample_weight=None): raise ValueError("cv!=None and store_cv_values=True " " are incompatible") parameters = {'alpha': self.alphas} + solver = 'sparse_cg' if sparse.issparse(X) else 'auto' gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept, - normalize=self.normalize), - parameters, cv=self.cv, scoring=self.scoring) + normalize=self.normalize, + solver=solver), + parameters, cv=cv, scoring=self.scoring) gs.fit(X, y, sample_weight=sample_weight) estimator = gs.best_estimator_ self.alpha_ = gs.best_estimator_.alpha diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 93e286f961f1d..f3a463c184543 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -337,6 +337,44 @@ def test_ridge_gcv_vs_k_fold(): assert np.allclose(gcv.intercept_, loo.intercept_, rtol=1e-4) +def test_ridge_gcv_sample_weights(): + x, y, c = make_regression( + n_samples=23, n_features=7, n_targets=4, coef=True, + random_state=0, shuffle=False, noise=30.) + x += 30 * np.random.RandomState(0).randn(x.shape[1]) + x_s = sp.csr_matrix(x) + sample_weights = 3 * np.random.RandomState(0).randn(len(x)) + sample_weights = np.asarray( + sample_weights - sample_weights.min() + 1, dtype=int) + indices = np.concatenate([n * [i] for (i, n) in enumerate(sample_weights)]) + sample_weights = 1. * sample_weights + tiled_x, tiled_y = x[indices], y[indices] + # alphas = [1e-3, .1, 1., 10., 1e3] + alphas = [1.e-10] + ridge = Ridge(fit_intercept=True, alpha=alphas[0], normalize=True) + ridge.fit(tiled_x, tiled_y) + for gcv_mode in ['svd', 'eigen']: + gcv = RidgeCV(fit_intercept=True, scoring='neg_mean_squared_error', + alphas=alphas, normalize=True, gcv_mode=gcv_mode) + gcv.fit(x, y, sample_weight=sample_weights) + assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) + assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) + + ridge = Ridge(fit_intercept=True, alpha=alphas[0], normalize=True, + solver='sparse_cg') + # ridge.fit(sp.csr_matrix(tiled_x), tiled_y) + # TODO: once Ridge is fixed to handle correctly sparse x and sample + # weights, replace with the line above. For now we just check ridgecv and + # ridge give the same result + ridge.fit(x_s, y, sample_weight=sample_weights) + for gcv_mode in ['svd', 'eigen']: + gcv = RidgeCV(fit_intercept=True, scoring='neg_mean_squared_error', + alphas=alphas, normalize=True, gcv_mode=gcv_mode) + gcv.fit(x_s, y, sample_weight=sample_weights) + assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) + assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) + + def _test_ridge_loo(filter_): # test that can work with both dense or sparse matrices n_samples = X_diabetes.shape[0] @@ -445,7 +483,7 @@ def _test_ridge_cv_normalize(filter_): ridge_cv = RidgeCV(normalize=True, cv=3) ridge_cv.fit(filter_(10. * X_diabetes), y_diabetes) - gs = GridSearchCV(Ridge(normalize=True), cv=3, + gs = GridSearchCV(Ridge(normalize=True, solver='sparse_cg'), cv=3, param_grid={'alpha': ridge_cv.alphas}) gs.fit(filter_(10. * X_diabetes), y_diabetes) assert_equal(gs.best_estimator_.alpha, ridge_cv.alpha_) @@ -794,13 +832,13 @@ def test_ridgecv_negative_alphas(): # Negative integers ridge = RidgeCV(alphas=(-1, -10, -100)) assert_raises_regex(ValueError, - "alphas cannot be negative.", + "alphas must be positive", ridge.fit, X, y) # Negative floats ridge = RidgeCV(alphas=(-0.1, -1.0, -10.0)) assert_raises_regex(ValueError, - "alphas cannot be negative.", + "alphas must be positive", ridge.fit, X, y) From 824231d651899937b250b9a4b44a8bfe07d01cc6 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 21:34:57 +0200 Subject: [PATCH 021/103] pep8 --- sklearn/linear_model/tests/test_ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index f3a463c184543..7ab07a6733695 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -462,7 +462,7 @@ def _test_ridge_loo(filter_): # check that we get same best alpha with sample weights if filter_ == DENSE_FILTER: ridge_gcv.fit(filter_(X_diabetes), y_diabetes, - sample_weight=np.ones(n_samples)) + sample_weight=np.ones(n_samples)) assert_equal(ridge_gcv.alpha_, alpha_) # simulate several responses From d1688ceb5b750fbf2a2f9cce191c94c8a5dbc239 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 22 Apr 2019 21:39:14 +0200 Subject: [PATCH 022/103] pep8 --- sklearn/linear_model/ridge.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index de66b345962cb..868532c7ae10d 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -418,8 +418,6 @@ def _ridge_regression(X, y, alpha, sample_weight=None, solver='auto', raise ValueError("Number of samples in X and y does not correspond:" " %d != %d" % (n_samples, n_samples_)) - - if has_sw: if np.atleast_1d(sample_weight).ndim > 1: raise ValueError("Sample weights must be 1D array or scalar") @@ -439,7 +437,6 @@ def _ridge_regression(X, y, alpha, sample_weight=None, solver='auto', if alpha.size == 1 and n_targets > 1: alpha = np.repeat(alpha, n_targets) - n_iter = None if solver == 'sparse_cg': coef = _solve_sparse_cg(X, y, alpha, From 4d9c3723b8f6e2b6c49b49f07dd8bbbf583742ef Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 24 Apr 2019 08:48:53 +0200 Subject: [PATCH 023/103] test sample weights with bigger alpha --- sklearn/linear_model/ridge.py | 3 +++ sklearn/linear_model/tests/test_ridge.py | 11 ++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 868532c7ae10d..3c4f52adeb1c9 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1368,6 +1368,9 @@ def fit(self, X, y, sample_weight=None): normalize=self.normalize, solver=solver), parameters, cv=cv, scoring=self.scoring) + # note: unlike when using gcv, sample weights won't be used + # to compute the validation score so selected hyperparameter + # may differ gs.fit(X, y, sample_weight=sample_weight) estimator = gs.best_estimator_ self.alpha_ = gs.best_estimator_.alpha diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index bd5ebdb51a2e5..14cfe4d810053 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -350,13 +350,14 @@ def test_ridge_gcv_sample_weights(): indices = np.concatenate([n * [i] for (i, n) in enumerate(sample_weights)]) sample_weights = 1. * sample_weights tiled_x, tiled_y = x[indices], y[indices] - # alphas = [1e-3, .1, 1., 10., 1e3] - alphas = [1.e-10] - ridge = Ridge(fit_intercept=True, alpha=alphas[0], normalize=True) + # loo scores won't be the same for expanded X and original X with sample + # weights so there must be only one value in the hyperparameter grid + alphas = [1.] + ridge = Ridge(fit_intercept=True, alpha=alphas[0], normalize=False) ridge.fit(tiled_x, tiled_y) for gcv_mode in ['svd', 'eigen']: - gcv = RidgeCV(fit_intercept=True, scoring='neg_mean_squared_error', - alphas=alphas, normalize=True, gcv_mode=gcv_mode) + gcv = RidgeCV(fit_intercept=True, alphas=alphas, + normalize=False, gcv_mode=gcv_mode) gcv.fit(x, y, sample_weight=sample_weights) assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) From 5b78c3bd454eb3adbe01f3b4c00bd2a020bb9985 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 24 Apr 2019 09:01:08 +0200 Subject: [PATCH 024/103] clip slice for indexing sparse matrices with scipy 0.17 --- sklearn/linear_model/ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 3c4f52adeb1c9..ba8b532a79415 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -949,7 +949,7 @@ def _sparse_multidot_diag(X, A, Xm, with_intercept=True): batch_size = X.shape[1] diag = np.empty(X.shape[0]) for start in range(0, X.shape[0], batch_size): - batch = slice(start, start + batch_size, 1) + batch = slice(start, min(X.shape[0], start + batch_size), 1) X_batch = np.ones( (X[batch].shape[0], X.shape[1] + with_intercept), dtype=X.dtype) if with_intercept: From 3df75d79b4990bad9404f9d98219faf04163a8ae Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 24 Apr 2019 09:32:49 +0200 Subject: [PATCH 025/103] ignore gridsearch deprecation warning --- sklearn/linear_model/tests/test_ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 14cfe4d810053..24876424816a9 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -372,7 +372,7 @@ def test_ridge_gcv_sample_weights(): for gcv_mode in ['svd', 'eigen']: gcv = RidgeCV(fit_intercept=True, scoring='neg_mean_squared_error', alphas=alphas, normalize=True, gcv_mode=gcv_mode) - gcv.fit(x_s, y, sample_weight=sample_weights) + ignore_warnings(gcv.fit)(x_s, y, sample_weight=sample_weights) assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) From d991a9af8839a65f070f2950b1c079ab7dc35466 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 29 Apr 2019 09:52:43 +0200 Subject: [PATCH 026/103] don't center in _pre_compute when X is dense --- sklearn/linear_model/ridge.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index ba8b532a79415..329a9462fc33e 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1040,7 +1040,9 @@ def _set_intercept(self, X_offset, y_offset, X_scale): super()._set_intercept(X_offset, y_offset, X_scale) def _pre_compute(self, X, y): - K, X_m = _centered_gram(X, self.fit_intercept) + # if X is dense it has already been centered in preprocessing + center = self.fit_intercept and sparse.issparse(X) + K, X_m = _centered_gram(X, center) if self.fit_intercept: K += 1. v, Q = linalg.eigh(K) From d0146b0118fbeaafe1211f4da19e0cb1989bb434 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 29 Apr 2019 10:17:57 +0200 Subject: [PATCH 027/103] update whats_new --- doc/whats_new/v0.21.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/whats_new/v0.21.rst b/doc/whats_new/v0.21.rst index b4d519230ca2d..f1f2d3c107ef0 100644 --- a/doc/whats_new/v0.21.rst +++ b/doc/whats_new/v0.21.rst @@ -35,6 +35,8 @@ random sampling procedures. seed, including :class:`linear_model.LogisticRegression`, :class:`linear_model.LogisticRegressionCV`, :class:`linear_model.Ridge`, and :class:`linear_model.RidgeCV` with 'sag' solver. |Fix| +- :class:`linear_model.ridge.RidgeCV` when using generalized cross-validation + with sparse inputs. |Fix| Details are listed in the changelog below. @@ -385,6 +387,10 @@ Support for Python 3.4 and below has been officially dropped. is provided (previously `cholesky` solver was selected). :issue:`13363` by :user:`Bartosz Telenczuk ` +- |Fix| :class:`linear_model.ridge.RidgeCV` with generalized cross-validation + now correctly fits an intercept when ``fit_intercept=True`` and the design + matrix is sparse. :issue:`13350` by :user:`Jérôme Dockès ` + :mod:`sklearn.manifold` ............................ From 5705cd55739b99240323be7279d05ec03f44d6f7 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 29 Apr 2019 11:48:41 +0200 Subject: [PATCH 028/103] use the square roots of sample weights rather than a column of ones to capture the intercept in ridge generalized cross-val when sample weights are provided --- sklearn/linear_model/ridge.py | 50 ++++++++++++++++++------ sklearn/linear_model/tests/test_ridge.py | 3 ++ 2 files changed, 42 insertions(+), 11 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 329a9462fc33e..8f6a6f089d001 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1044,7 +1044,11 @@ def _pre_compute(self, X, y): center = self.fit_intercept and sparse.issparse(X) K, X_m = _centered_gram(X, center) if self.fit_intercept: - K += 1. + if self._with_sw: + K += np.outer( + self._sqrt_sw, self._sqrt_sw) + else: + K += 1. v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) self._X_offset = X_m @@ -1060,9 +1064,13 @@ def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): """ w = 1. / (v + alpha) if self.fit_intercept: - constant_column = np.var(Q, 0) < 1.e-12 - # detect constant columns - w[constant_column] = 0 # cancel regularization for the intercept + if self._with_sw: + intercept_dim = np.isclose( + Q, self._normalized_sqrt_sw[:, None]).all(axis=0) + else: + intercept_dim = np.var(Q, 0) < 1.e-12 + # detect intercept + w[intercept_dim] = 0 # cancel regularization for the intercept c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) @@ -1088,7 +1096,10 @@ def _pre_compute_svd_sparse(self, X, y): else: cov[-1] = 0 cov[:, -1] = 0 - cov[-1, -1] = n + if self._with_sw: + cov[-1, -1] = self._weight_sum + else: + cov[-1, -1] = n kernel_size = max(0, X.shape[1] - X.shape[0]) s, V = linalg.eigh(cov) s = s[kernel_size:] @@ -1117,7 +1128,10 @@ def _errors_and_values_svd_helper_sparse_intercept( and self._values_svd. """ n, p = X.shape - intercept_dim, *_ = np.where(s == n) + if self._with_sw: + intercept_dim, *_ = np.where(s == self._weight_sum) + else: + intercept_dim, *_ = np.where(s == n) w = 1 / (s + alpha) if len(intercept_dim) == 1: w[intercept_dim[0]] = 1 / s[intercept_dim[0]] @@ -1161,7 +1175,11 @@ def _errors_and_values_svd_helper_sparse(self, alpha, y, s, V, X): def _pre_compute_svd_dense(self, X, y): if self.fit_intercept: - X = np.hstack((X, np.ones((X.shape[0], 1)))) + if self._with_sw: + intercept = self._sqrt_sw[:, None] + else: + intercept = np.ones((X.shape[0], 1)) + X = np.hstack((X, intercept)) # to emulate fit_intercept=True situation, add a column on ones # Note that by centering, the other columns are orthogonal to that one U, s, _ = linalg.svd(X, full_matrices=0) @@ -1173,10 +1191,14 @@ def _errors_and_values_svd_helper_dense(self, alpha, y, v, U, UT_y): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. """ - constant_column = np.var(U, 0) < 1.e-12 - # detect columns colinear to ones + if self._with_sw: + intercept_dim = np.isclose( + U, self._normalized_sqrt_sw[:, None]).all(axis=0) + else: + intercept_dim = np.var(U, 0) < 1.e-12 + # detect intercept column w = ((v + alpha) ** -1) - (alpha ** -1) - w[constant_column] = - (alpha ** -1) + w[intercept_dim] = - (alpha ** -1) # cancel the regularization for the intercept c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) @@ -1257,7 +1279,13 @@ def fit(self, X, y, sample_weight=None): if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) - + self._sqrt_sw = np.sqrt(sample_weight) + self._normalized_sqrt_sw = self._sqrt_sw / np.linalg.norm( + self._sqrt_sw) + self._weight_sum = sample_weight.sum() + self._with_sw = True + else: + self._with_sw = False precomputed = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 24876424816a9..b6f8a8faf03f0 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -389,6 +389,7 @@ def _test_ridge_loo(filter_): else: X_diabetes_ = X_diabetes ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept) + ridge_gcv._with_sw = False ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept) # because fit_intercept is applied @@ -961,6 +962,7 @@ def test_ridge_regression_check_arguments_validity(return_intercept, def test_errors_and_values_helper(): ridgecv = _RidgeGCV() + ridgecv._with_sw = False rng = check_random_state(42) alpha = 1. n = 5 @@ -982,6 +984,7 @@ def test_errors_and_values_helper(): def test_errors_and_values_svd_helper(): ridgecv = _RidgeGCV() + ridgecv._with_sw = False rng = check_random_state(42) alpha = 1. for n, p in zip((5, 10), (12, 6)): From 464e50734b34da524c1626dc60553b0a16d1f3f6 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 29 Apr 2019 11:50:28 +0200 Subject: [PATCH 029/103] compare to singular vector rather than check singular value to find intercept direction --- sklearn/linear_model/ridge.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 8f6a6f089d001..f4e285d6a2c8b 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1128,13 +1128,14 @@ def _errors_and_values_svd_helper_sparse_intercept( and self._values_svd. """ n, p = X.shape + intercept_sv = np.zeros(V.shape[0]) + intercept_sv[-1] = 1 if self._with_sw: - intercept_dim, *_ = np.where(s == self._weight_sum) + intercept_dim = np.isclose(V, intercept_sv[:, None]).all(axis=0) else: - intercept_dim, *_ = np.where(s == n) + intercept_dim = np.isclose(V, intercept_sv[:, None]).all(axis=0) w = 1 / (s + alpha) - if len(intercept_dim) == 1: - w[intercept_dim[0]] = 1 / s[intercept_dim[0]] + w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) Xm = self._X_offset From c5a35ce9a92e598585a412abc26b17db51b81ca5 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 29 Apr 2019 16:12:09 +0200 Subject: [PATCH 030/103] add some comments + sample weights in sparse primal --- sklearn/linear_model/ridge.py | 76 ++++++++++++++++++++++++++++++----- 1 file changed, 67 insertions(+), 9 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index f4e285d6a2c8b..385c116405d31 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -920,6 +920,15 @@ def classes_(self): def _centered_gram(X, center=True): + """Computes centered Gram matrix. + + Notes + ----- + if center is True, compute + (X - X.mean(axis=0)).dot((X - X.mean(axis=0)).T) + else + X.dot(X.T) + """ if not center: X_m = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X, X.T, dense_output=True), X_m @@ -933,6 +942,15 @@ def _centered_gram(X, center=True): def _centered_covariance(X, center=True): + """Computes centered Gram matrix. + + Notes + ----- + if center is True, compute + (X - X.mean(axis=0)).T.dot(X - X.mean(axis=0)) + else + X.T.dot(X) + """ if not center: X_m = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X.T, X, dense_output=True), X_m @@ -945,7 +963,11 @@ def _centered_covariance(X, center=True): X.T, X, dense_output=True) - n * np.outer(X_m, X_m), X_m -def _sparse_multidot_diag(X, A, Xm, with_intercept=True): +def _sparse_multidot_diag(X, A, Xm, with_intercept=True, intercept_col=None): + """ + compute the diagonal of (X - Xm).dot(A).dot((X - Xm).T) + when X is sparse, without storing X - Xm nor X.dot(A) + """ batch_size = X.shape[1] diag = np.empty(X.shape[0]) for start in range(0, X.shape[0], batch_size): @@ -954,6 +976,9 @@ def _sparse_multidot_diag(X, A, Xm, with_intercept=True): (X[batch].shape[0], X.shape[1] + with_intercept), dtype=X.dtype) if with_intercept: X_batch[:, :-1] = X[batch].A - Xm + if intercept_col is not None: + X_batch[:, -1] = intercept_col[batch] + print(X_batch[:, -1]) else: X_batch = X[batch].A diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) @@ -963,10 +988,14 @@ def _sparse_multidot_diag(X, A, Xm, with_intercept=True): def _check_gcv_mode(X, sample_weights): sparse_x = sparse.issparse(X) with_sample_weights = np.ndim(sample_weights) > 0 + # sample weights not supported with sparse design yet, + # because mean_variance_axis does not support sample weights if with_sample_weights and sparse_x: raise ValueError( 'sample weights not (yet) supported by ' 'generalized cross-validation when X is sparse') + # if X has more rows than columns, use decomposition of X^T.X, + # otherwise X.X^T if X.shape[0] > X.shape[1]: return 'svd' return 'eigen' @@ -1035,6 +1064,7 @@ def _diag_dot(self, D, B): return D * B def _set_intercept(self, X_offset, y_offset, X_scale): + # add the mean of X which was computed separately if X is sparse if getattr(self, '_X_offset', None) is not None: X_offset = X_offset + self._X_offset * X_scale super()._set_intercept(X_offset, y_offset, X_scale) @@ -1045,9 +1075,13 @@ def _pre_compute(self, X, y): K, X_m = _centered_gram(X, center) if self.fit_intercept: if self._with_sw: - K += np.outer( - self._sqrt_sw, self._sqrt_sw) + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + K += np.outer(self._sqrt_sw, self._sqrt_sw) else: + # with uniform sample weights we add a column of 1 K += 1. v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) @@ -1064,12 +1098,16 @@ def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): """ w = 1. / (v + alpha) if self.fit_intercept: + # the vector containing the square roots of the sample weights (1 + # when no sample weights) is the eigenvector of XX^T which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weights). if self._with_sw: intercept_dim = np.isclose( Q, self._normalized_sqrt_sw[:, None]).all(axis=0) else: intercept_dim = np.var(Q, 0) < 1.e-12 - # detect intercept w[intercept_dim] = 0 # cancel regularization for the intercept c = np.dot(Q, self._diag_dot(w, QT_y)) @@ -1093,6 +1131,11 @@ def _pre_compute_svd_sparse(self, X, y): cov[:-1, :-1], X_m = _centered_covariance(X, self.fit_intercept) if not self.fit_intercept: cov = cov[:-1, :-1] + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + # when all samples have the same weight we add a column of 1 else: cov[-1] = 0 cov[:, -1] = 0 @@ -1102,6 +1145,7 @@ def _pre_compute_svd_sparse(self, X, y): cov[-1, -1] = n kernel_size = max(0, X.shape[1] - X.shape[0]) s, V = linalg.eigh(cov) + # remove eigenvalues and vectors in the null space of X^T.X s = s[kernel_size:] V = V[:, kernel_size:] self._X_offset = X_m @@ -1109,6 +1153,7 @@ def _pre_compute_svd_sparse(self, X, y): def _errors_and_values_svd_helper_sparse_no_intercept( self, alpha, y, s, V, X): + """compute loo values and dual coef when X is sparse""" n, p = X.shape w = 1 / (s + alpha) A = (V * w).dot(V.T) @@ -1116,7 +1161,6 @@ def _errors_and_values_svd_helper_sparse_no_intercept( AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) y_hat = safe_sparse_dot(X, AXy, dense_output=True) hat_diag = _sparse_multidot_diag(X, A, Xm, False) - # return (1 - hat_diag), (y - y_hat) if len(y.shape) != 1: # handle case where y is 2-d hat_diag = hat_diag[:, np.newaxis] @@ -1126,8 +1170,17 @@ def _errors_and_values_svd_helper_sparse_intercept( self, alpha, y, s, V, X): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. + + compute loo values and dual coef when X is sparse and we fit an + intercept. + """ n, p = X.shape + # the vector [0, 0, ..., 0, 1] + # is the eigenvector of X^TX which + # corresponds to the intercept; we cancel the regularization on + # this dimension. the corresponding eigenvalue is + # sum(sample_weights), e.g. n when uniform sample weights. intercept_sv = np.zeros(V.shape[0]) intercept_sv[-1] = 1 if self._with_sw: @@ -1138,21 +1191,25 @@ def _errors_and_values_svd_helper_sparse_intercept( w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) Xm = self._X_offset + # add a column to X containing the square roots of sample weights + sw = self._sqrt_sw if self._with_sw else np.ones( + X.shape[0], dtype=X.dtype) def matvec(v): return safe_sparse_dot( - X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] + X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] * sw def matmat(v): return safe_sparse_dot( - X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] + X, v[:-1], dense_output=True) - Xm.dot( + v[:-1]) + v[-1] * sw[:, None] def rmatvec(v): v = v.ravel() res = np.empty(X.shape[1] + 1) res[:-1] = safe_sparse_dot( X.T, v, dense_output=True) - Xm * v.sum(axis=0) - res[-1] = v.sum(axis=0) + res[-1] = np.dot(v, sw) return res Xop = sparse.linalg.LinearOperator( @@ -1160,7 +1217,8 @@ def rmatvec(v): shape=(X.shape[0], X.shape[1] + 1), dtype=X.dtype) AXy = A.dot(Xop.adjoint().dot(y)) y_hat = Xop.dot(AXy) - hat_diag = _sparse_multidot_diag(X, A, Xm) + hat_diag = _sparse_multidot_diag( + X, A, Xm, True, getattr(self, '_sqrt_sw', None)) # return (1 - hat_diag), (y - y_hat) if len(y.shape) != 1: # handle case where y is 2-d From bcf87e9a5ff84fbd89f896a54ed7643cc0761787 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 29 Apr 2019 17:25:21 +0200 Subject: [PATCH 031/103] duplicated line --- sklearn/linear_model/ridge.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 385c116405d31..e80e6fd8e9bf8 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -978,7 +978,6 @@ def _sparse_multidot_diag(X, A, Xm, with_intercept=True, intercept_col=None): X_batch[:, :-1] = X[batch].A - Xm if intercept_col is not None: X_batch[:, -1] = intercept_col[batch] - print(X_batch[:, -1]) else: X_batch = X[batch].A diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) @@ -1183,10 +1182,7 @@ def _errors_and_values_svd_helper_sparse_intercept( # sum(sample_weights), e.g. n when uniform sample weights. intercept_sv = np.zeros(V.shape[0]) intercept_sv[-1] = 1 - if self._with_sw: - intercept_dim = np.isclose(V, intercept_sv[:, None]).all(axis=0) - else: - intercept_dim = np.isclose(V, intercept_sv[:, None]).all(axis=0) + intercept_dim = np.isclose(V, intercept_sv[:, None]).all(axis=0) w = 1 / (s + alpha) w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) From 4d58bf9a648bc24cfe8aecd26855e302c8b6ad7e Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 08:15:11 +0200 Subject: [PATCH 032/103] parametrize test_ridge_gcv_vs_k_fold --- sklearn/linear_model/tests/test_ridge.py | 52 +++++++++++++----------- 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index b6f8a8faf03f0..8b7b615476a95 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -311,31 +311,35 @@ def test_ridge_individual_penalties(): assert_raises(ValueError, ridge.fit, X, y) -def test_ridge_gcv_vs_k_fold(): +@pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) +@pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) +@pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)]) +@pytest.mark.parametrize('fit_intercept', [True, False]) +@pytest.mark.parametrize('normalize', [True, False]) +def test_ridge_gcv_vs_k_fold(gcv_mode, X_constructor, X_shape, fit_intercept, + normalize): + n_samples, n_features = X_shape + X, y = make_regression( + n_samples=n_samples, n_features=n_features, n_targets=3, + random_state=0, shuffle=False, noise=30., n_informative=5 + ) + X += 30 * np.random.RandomState(0).randn(X.shape[1]) + alphas = [1e-3, .1, 1., 10., 1e3] - shapes = [(11, 8), (11, 20)] - for (n_samples, n_features), fit_intercept, normalize in product( - shapes, [True, False], [True, False]): - x, y = make_regression( - n_samples=n_samples, n_features=n_features, n_targets=3, - random_state=0, shuffle=False, noise=30., n_informative=5) - x += 30 * np.random.RandomState(0).randn(x.shape[1]) - x_s = sp.csr_matrix(x) - loo = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, - alphas=alphas, scoring='neg_mean_squared_error', - normalize=normalize) - loo.fit(x, y) - for gcv_mode, sparse_x in product(['svd', 'eigen'], [True, False]): - print('{}, {}, sparse: {}, intercept: {}, alpha: {}'.format( - (n_samples, n_features), gcv_mode, sparse_x, fit_intercept, - loo.alpha_)) - xx = x_s if sparse_x else x - gcv = RidgeCV(gcv_mode=gcv_mode, fit_intercept=fit_intercept, - alphas=alphas, normalize=normalize) - gcv.fit(xx, y) - assert gcv.alpha_ == loo.alpha_ - assert np.allclose(gcv.coef_, loo.coef_, rtol=1e-4) - assert np.allclose(gcv.intercept_, loo.intercept_, rtol=1e-4) + loo_ridge = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, + alphas=alphas, scoring='neg_mean_squared_error', + normalize=normalize) + gcv_ridge = RidgeCV(gcv_mode=gcv_mode, fit_intercept=fit_intercept, + alphas=alphas, normalize=normalize) + + loo_ridge.fit(X, y) + + X_gcv = X_constructor(X) + gcv_ridge.fit(X_gcv, y) + + assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_) + assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-4) + assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-4) def test_ridge_gcv_sample_weights(): From f89300801e92f089c3629ccb3d6ae2286179003f Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 08:18:15 +0200 Subject: [PATCH 033/103] better helper function names --- sklearn/linear_model/ridge.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index e80e6fd8e9bf8..32c9870ab2fab 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -919,7 +919,7 @@ def classes_(self): return self._label_binarizer.classes_ -def _centered_gram(X, center=True): +def _compute_gram(X, center=True): """Computes centered Gram matrix. Notes @@ -941,7 +941,7 @@ def _centered_gram(X, center=True): - X_mX - X_mX[:, None], X_m) -def _centered_covariance(X, center=True): +def _compute_covariance(X, center=True): """Computes centered Gram matrix. Notes @@ -1071,7 +1071,7 @@ def _set_intercept(self, X_offset, y_offset, X_scale): def _pre_compute(self, X, y): # if X is dense it has already been centered in preprocessing center = self.fit_intercept and sparse.issparse(X) - K, X_m = _centered_gram(X, center) + K, X_m = _compute_gram(X, center) if self.fit_intercept: if self._with_sw: # to emulate centering X with sample weights, @@ -1127,7 +1127,7 @@ def _values(self, alpha, y, v, Q, QT_y): def _pre_compute_svd_sparse(self, X, y): n, p = X.shape cov = np.empty((p + 1, p + 1)) - cov[:-1, :-1], X_m = _centered_covariance(X, self.fit_intercept) + cov[:-1, :-1], X_m = _compute_covariance(X, self.fit_intercept) if not self.fit_intercept: cov = cov[:-1, :-1] # to emulate centering X with sample weights, From d99fe7e314d9881622661a611b85511d13b5c106 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 10:03:04 +0200 Subject: [PATCH 034/103] handle sample weights with sparse X with gcv_mode='svd' --- sklearn/linear_model/ridge.py | 168 ++++++++++++++++++---------------- 1 file changed, 87 insertions(+), 81 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 32c9870ab2fab..5ea688aa7ed30 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -919,80 +919,16 @@ def classes_(self): return self._label_binarizer.classes_ -def _compute_gram(X, center=True): - """Computes centered Gram matrix. - - Notes - ----- - if center is True, compute - (X - X.mean(axis=0)).dot((X - X.mean(axis=0)).T) - else - X.dot(X.T) - """ - if not center: - X_m = np.zeros(X.shape[1], dtype=X.dtype) - return safe_sparse_dot(X, X.T, dense_output=True), X_m - if sparse.issparse(X): - X_m, _ = mean_variance_axis(X, axis=0) - else: - X_m = X.mean(axis=0) - X_mX = safe_sparse_dot(X_m, X.T, dense_output=True) - return (safe_sparse_dot(X, X.T, dense_output=True) + np.dot(X_m, X_m) - - X_mX - X_mX[:, None], X_m) - - -def _compute_covariance(X, center=True): - """Computes centered Gram matrix. - - Notes - ----- - if center is True, compute - (X - X.mean(axis=0)).T.dot(X - X.mean(axis=0)) - else - X.T.dot(X) - """ - if not center: - X_m = np.zeros(X.shape[1], dtype=X.dtype) - return safe_sparse_dot(X.T, X, dense_output=True), X_m - n = X.shape[0] - if sparse.issparse(X): - X_m, _ = mean_variance_axis(X, axis=0) - else: - X_m = X.mean(axis=0) - return safe_sparse_dot( - X.T, X, dense_output=True) - n * np.outer(X_m, X_m), X_m - - -def _sparse_multidot_diag(X, A, Xm, with_intercept=True, intercept_col=None): - """ - compute the diagonal of (X - Xm).dot(A).dot((X - Xm).T) - when X is sparse, without storing X - Xm nor X.dot(A) - """ - batch_size = X.shape[1] - diag = np.empty(X.shape[0]) - for start in range(0, X.shape[0], batch_size): - batch = slice(start, min(X.shape[0], start + batch_size), 1) - X_batch = np.ones( - (X[batch].shape[0], X.shape[1] + with_intercept), dtype=X.dtype) - if with_intercept: - X_batch[:, :-1] = X[batch].A - Xm - if intercept_col is not None: - X_batch[:, -1] = intercept_col[batch] - else: - X_batch = X[batch].A - diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) - return diag - - def _check_gcv_mode(X, sample_weights): sparse_x = sparse.issparse(X) with_sample_weights = np.ndim(sample_weights) > 0 # sample weights not supported with sparse design yet, # because mean_variance_axis does not support sample weights if with_sample_weights and sparse_x: - raise ValueError( - 'sample weights not (yet) supported by ' - 'generalized cross-validation when X is sparse') + pass + # raise ValueError( + # 'sample weights not (yet) supported by ' + # 'generalized cross-validation when X is sparse') # if X has more rows than columns, use decomposition of X^T.X, # otherwise X.X^T if X.shape[0] > X.shape[1]: @@ -1062,6 +998,70 @@ def _diag_dot(self, D, B): D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B + def _compute_gram(self, X, center=True): + """Computes centered Gram matrix. + + Notes + ----- + if center is True, compute + (X - X.mean(axis=0)).dot((X - X.mean(axis=0)).T) + else + X.dot(X.T) + """ + if not center: + X_m = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X, X.T, dense_output=True), X_m + if sparse.issparse(X): + X_m, _ = mean_variance_axis(X, axis=0) + else: + X_m = X.mean(axis=0) + X_mX = safe_sparse_dot(X_m, X.T, dense_output=True) + return (safe_sparse_dot(X, X.T, dense_output=True) + np.dot(X_m, X_m) + - X_mX - X_mX[:, None], X_m) + + def _compute_covariance(self, X, center=True): + """Computes centered Gram matrix. + + Notes + ----- + if center is True, compute + (X - X.mean(axis=0)).T.dot(X - X.mean(axis=0)) + else + X.T.dot(X) + """ + if not center: + X_m = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X.T, X, dense_output=True), X_m + n = X.shape[0] + X_w = self._sqrt_sw_matrix.dot(X) + X_m, _ = mean_variance_axis(X_w, axis=0) + X_m = X_m * n / self._weight_sum + return safe_sparse_dot( + X.T, X, dense_output=True) - self._weight_sum * np.outer( + X_m, X_m), X_m + + def _sparse_multidot_diag(self, X, A, Xm): + """ + compute the diagonal of (X - Xm).dot(A).dot((X - Xm).T) + when X is sparse, without storing X - Xm nor X.dot(A) + """ + intercept_col = self._sqrt_sw + scale = self._sqrt_sw + batch_size = X.shape[1] + diag = np.empty(X.shape[0]) + for start in range(0, X.shape[0], batch_size): + batch = slice(start, min(X.shape[0], start + batch_size), 1) + X_batch = np.empty( + (X[batch].shape[0], X.shape[1] + self.fit_intercept), + dtype=X.dtype) + if self.fit_intercept: + X_batch[:, :-1] = X[batch].A - Xm * scale[batch][:, None] + X_batch[:, -1] = intercept_col[batch] + else: + X_batch = X[batch].A + diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) + return diag + def _set_intercept(self, X_offset, y_offset, X_scale): # add the mean of X which was computed separately if X is sparse if getattr(self, '_X_offset', None) is not None: @@ -1071,7 +1071,7 @@ def _set_intercept(self, X_offset, y_offset, X_scale): def _pre_compute(self, X, y): # if X is dense it has already been centered in preprocessing center = self.fit_intercept and sparse.issparse(X) - K, X_m = _compute_gram(X, center) + K, X_m = self._compute_gram(X, center) if self.fit_intercept: if self._with_sw: # to emulate centering X with sample weights, @@ -1127,7 +1127,7 @@ def _values(self, alpha, y, v, Q, QT_y): def _pre_compute_svd_sparse(self, X, y): n, p = X.shape cov = np.empty((p + 1, p + 1)) - cov[:-1, :-1], X_m = _compute_covariance(X, self.fit_intercept) + cov[:-1, :-1], X_m = self._compute_covariance(X, self.fit_intercept) if not self.fit_intercept: cov = cov[:-1, :-1] # to emulate centering X with sample weights, @@ -1159,7 +1159,7 @@ def _errors_and_values_svd_helper_sparse_no_intercept( Xm = self._X_offset AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) y_hat = safe_sparse_dot(X, AXy, dense_output=True) - hat_diag = _sparse_multidot_diag(X, A, Xm, False) + hat_diag = self._sparse_multidot_diag(X, A, Xm) if len(y.shape) != 1: # handle case where y is 2-d hat_diag = hat_diag[:, np.newaxis] @@ -1187,24 +1187,26 @@ def _errors_and_values_svd_helper_sparse_intercept( w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) Xm = self._X_offset + S = self._sqrt_sw_matrix # add a column to X containing the square roots of sample weights sw = self._sqrt_sw if self._with_sw else np.ones( X.shape[0], dtype=X.dtype) def matvec(v): return safe_sparse_dot( - X, v[:-1], dense_output=True) - Xm.dot(v[:-1]) + v[-1] * sw + X, v[:-1], dense_output=True + ) - sw * Xm.dot(v[:-1]) + v[-1] * sw def matmat(v): return safe_sparse_dot( - X, v[:-1], dense_output=True) - Xm.dot( - v[:-1]) + v[-1] * sw[:, None] + X, v[:-1], dense_output=True + ) - sw[:, None] * Xm.dot(v[:-1]) + v[-1] * sw[:, None] def rmatvec(v): v = v.ravel() res = np.empty(X.shape[1] + 1) res[:-1] = safe_sparse_dot( - X.T, v, dense_output=True) - Xm * v.sum(axis=0) + X.T, v, dense_output=True) - Xm * sw.dot(v) res[-1] = np.dot(v, sw) return res @@ -1213,8 +1215,7 @@ def rmatvec(v): shape=(X.shape[0], X.shape[1] + 1), dtype=X.dtype) AXy = A.dot(Xop.adjoint().dot(y)) y_hat = Xop.dot(AXy) - hat_diag = _sparse_multidot_diag( - X, A, Xm, True, getattr(self, '_sqrt_sw', None)) + hat_diag = self._sparse_multidot_diag(X, A, Xm) # return (1 - hat_diag), (y - y_hat) if len(y.shape) != 1: # handle case where y is 2-d @@ -1341,6 +1342,10 @@ def fit(self, X, y, sample_weight=None): self._with_sw = True else: self._with_sw = False + self._sqrt_sw = np.ones(X.shape[0], dtype=X.dtype) + self._weight_sum = X.shape[0] + self._sqrt_sw_matrix = sparse.dia_matrix( + (self._sqrt_sw, 0), shape=(X.shape[0], X.shape[0])) precomputed = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) @@ -1428,10 +1433,11 @@ def fit(self, X, y, sample_weight=None): gcv_modes = {None, 'auto', 'svd', 'eigen'} if self.cv in gcv_modes and sparse.issparse(X) and np.ndim( sample_weight): - warnings.warn( - 'sample weights with sparse X and gcv not supported, ' - 'falling back to 5-fold cross-validation') - cv = 5 + pass + # warnings.warn( + # 'sample weights with sparse X and gcv not supported, ' + # 'falling back to 5-fold cross-validation') + # cv = 5 if cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, From d826d323bdedc61f33a7d8dbc6a466a70a9fa17f Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 10:21:08 +0200 Subject: [PATCH 035/103] handle sparse x and sample weights when gcv_mode='eigen' --- sklearn/linear_model/ridge.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 5ea688aa7ed30..798cd024cde22 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1011,13 +1011,18 @@ def _compute_gram(self, X, center=True): if not center: X_m = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X, X.T, dense_output=True), X_m - if sparse.issparse(X): - X_m, _ = mean_variance_axis(X, axis=0) - else: - X_m = X.mean(axis=0) - X_mX = safe_sparse_dot(X_m, X.T, dense_output=True) - return (safe_sparse_dot(X, X.T, dense_output=True) + np.dot(X_m, X_m) - - X_mX - X_mX[:, None], X_m) + n = X.shape[0] + X_w = self._sqrt_sw_matrix.dot(X) + X_m, _ = mean_variance_axis(X_w, axis=0) + X_m = X_m * n / self._weight_sum + X_mX = self._sqrt_sw[:, None] * safe_sparse_dot( + X_m, X.T, dense_output=True) + X_mX_m = np.empty((X.shape[0], X.shape[0]), dtype=X.dtype) + X_mX_m[:, :] = np.dot(X_m, X_m) + X_mX_m = X_mX_m * self._sqrt_sw + X_mX_m = X_mX_m * self._sqrt_sw[:, None] + return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m + - X_mX - X_mX.T, X_m) def _compute_covariance(self, X, center=True): """Computes centered Gram matrix. From 84e3a6cf7c3fe67e7292a79dd763d3f38a3938bb Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 10:32:45 +0200 Subject: [PATCH 036/103] fix test + test sample weights with sparse x --- sklearn/linear_model/ridge.py | 1 - sklearn/linear_model/tests/test_ridge.py | 23 +++++++---------------- 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 798cd024cde22..4d30bd3bd621c 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1192,7 +1192,6 @@ def _errors_and_values_svd_helper_sparse_intercept( w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) Xm = self._X_offset - S = self._sqrt_sw_matrix # add a column to X containing the square roots of sample weights sw = self._sqrt_sw if self._with_sw else np.ones( X.shape[0], dtype=X.dtype) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 8b7b615476a95..2d718734860a6 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -359,24 +359,10 @@ def test_ridge_gcv_sample_weights(): alphas = [1.] ridge = Ridge(fit_intercept=True, alpha=alphas[0], normalize=False) ridge.fit(tiled_x, tiled_y) - for gcv_mode in ['svd', 'eigen']: + for (gcv_mode, x_gcv) in product(['svd', 'eigen'], [x, x_s]): gcv = RidgeCV(fit_intercept=True, alphas=alphas, normalize=False, gcv_mode=gcv_mode) - gcv.fit(x, y, sample_weight=sample_weights) - assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) - assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) - - ridge = Ridge(fit_intercept=True, alpha=alphas[0], normalize=True, - solver='sparse_cg') - # ridge.fit(sp.csr_matrix(tiled_x), tiled_y) - # TODO: once Ridge is fixed to handle correctly sparse x and sample - # weights, replace with the line above. For now we just check ridgecv and - # ridge give the same result - ridge.fit(x_s, y, sample_weight=sample_weights) - for gcv_mode in ['svd', 'eigen']: - gcv = RidgeCV(fit_intercept=True, scoring='neg_mean_squared_error', - alphas=alphas, normalize=True, gcv_mode=gcv_mode) - ignore_warnings(gcv.fit)(x_s, y, sample_weight=sample_weights) + gcv.fit(x_gcv, y, sample_weight=sample_weights) assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) @@ -394,6 +380,11 @@ def _test_ridge_loo(filter_): X_diabetes_ = X_diabetes ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept) ridge_gcv._with_sw = False + ridge_gcv._sqrt_sw = np.ones(X_diabetes_.shape[0]) + ridge_gcv._sqrt_sw_matrix = sp.dia_matrix( + (ridge_gcv._sqrt_sw, 0), + shape=(X_diabetes_.shape[0], X_diabetes_.shape[0])) + ridge_gcv._weight_sum = X_diabetes_.shape[0] ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept) # because fit_intercept is applied From 7b21b3f87452722e0bef0b0893228de893a1de78 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 10:39:32 +0200 Subject: [PATCH 037/103] refactor gcv mode selection --- sklearn/linear_model/ridge.py | 30 ++++++------------------------ 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 4d30bd3bd621c..9039fe05083c6 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -919,16 +919,11 @@ def classes_(self): return self._label_binarizer.classes_ -def _check_gcv_mode(X, sample_weights): - sparse_x = sparse.issparse(X) - with_sample_weights = np.ndim(sample_weights) > 0 - # sample weights not supported with sparse design yet, - # because mean_variance_axis does not support sample weights - if with_sample_weights and sparse_x: - pass - # raise ValueError( - # 'sample weights not (yet) supported by ' - # 'generalized cross-validation when X is sparse') +def _check_gcv_mode(X, gcv_mode): + if gcv_mode not in {None, 'auto', 'svd', 'eigen'}: + raise ValueError('bad gcv_mode "%s"' % gcv_mode) + if gcv_mode not in {None, 'auto'}: + return gcv_mode # if X has more rows than columns, use decomposition of X^T.X, # otherwise X.X^T if X.shape[0] > X.shape[1]: @@ -1315,10 +1310,7 @@ def fit(self, X, y, sample_weight=None): X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) - gcv_mode = self.gcv_mode - best_gcv_mode = _check_gcv_mode(X, sample_weight) - if gcv_mode is None or gcv_mode == 'auto': - gcv_mode = best_gcv_mode + gcv_mode = _check_gcv_mode(X, self.gcv_mode) if gcv_mode == 'eigen': _pre_compute = self._pre_compute @@ -1334,8 +1326,6 @@ def fit(self, X, y, sample_weight=None): _pre_compute = self._pre_compute_svd_dense _errors = self._errors_svd_dense _values = self._values_svd_dense - else: - raise ValueError('bad gcv_mode "%s"' % gcv_mode) if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) @@ -1434,14 +1424,6 @@ def fit(self, X, y, sample_weight=None): self : object """ cv = self.cv - gcv_modes = {None, 'auto', 'svd', 'eigen'} - if self.cv in gcv_modes and sparse.issparse(X) and np.ndim( - sample_weight): - pass - # warnings.warn( - # 'sample weights with sparse X and gcv not supported, ' - # 'falling back to 5-fold cross-validation') - # cv = 5 if cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, From b1abf026fed265e18d8aa6f737c610eb3dfe77bc Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 10:45:21 +0200 Subject: [PATCH 038/103] parametrize sample_weights test + test for p > n and fit_intercept=0 --- sklearn/linear_model/tests/test_ridge.py | 26 +++++++++++++----------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 2d718734860a6..10706e8df2795 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -342,29 +342,31 @@ def test_ridge_gcv_vs_k_fold(gcv_mode, X_constructor, X_shape, fit_intercept, assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-4) -def test_ridge_gcv_sample_weights(): +@pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) +@pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) +@pytest.mark.parametrize('fit_intercept', [True, False]) +@pytest.mark.parametrize('n_features', [7, 31]) +def test_ridge_gcv_sample_weights( + gcv_mode, X_constructor, fit_intercept, n_features): x, y, c = make_regression( - n_samples=23, n_features=7, n_targets=4, coef=True, + n_samples=23, n_features=n_features, n_targets=4, coef=True, random_state=0, shuffle=False, noise=30.) x += 30 * np.random.RandomState(0).randn(x.shape[1]) - x_s = sp.csr_matrix(x) sample_weights = 3 * np.random.RandomState(0).randn(len(x)) sample_weights = np.asarray( sample_weights - sample_weights.min() + 1, dtype=int) indices = np.concatenate([n * [i] for (i, n) in enumerate(sample_weights)]) sample_weights = 1. * sample_weights tiled_x, tiled_y = x[indices], y[indices] - # loo scores won't be the same for expanded X and original X with sample - # weights so there must be only one value in the hyperparameter grid alphas = [1.] - ridge = Ridge(fit_intercept=True, alpha=alphas[0], normalize=False) + ridge = Ridge(fit_intercept=fit_intercept, alpha=alphas[0], normalize=False) ridge.fit(tiled_x, tiled_y) - for (gcv_mode, x_gcv) in product(['svd', 'eigen'], [x, x_s]): - gcv = RidgeCV(fit_intercept=True, alphas=alphas, - normalize=False, gcv_mode=gcv_mode) - gcv.fit(x_gcv, y, sample_weight=sample_weights) - assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) - assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) + x_gcv = X_constructor(x) + gcv = RidgeCV(fit_intercept=fit_intercept, alphas=alphas, + normalize=False, gcv_mode=gcv_mode) + gcv.fit(x_gcv, y, sample_weight=sample_weights) + assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) + assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) def _test_ridge_loo(filter_): From 5538d4fe2870ae6d9a230150f5f5fd5e6432ae27 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 10:47:13 +0200 Subject: [PATCH 039/103] pep8 --- sklearn/linear_model/tests/test_ridge.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 10706e8df2795..ce6f98db64d41 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -359,7 +359,8 @@ def test_ridge_gcv_sample_weights( sample_weights = 1. * sample_weights tiled_x, tiled_y = x[indices], y[indices] alphas = [1.] - ridge = Ridge(fit_intercept=fit_intercept, alpha=alphas[0], normalize=False) + ridge = Ridge( + fit_intercept=fit_intercept, alpha=alphas[0], normalize=False) ridge.fit(tiled_x, tiled_y) x_gcv = X_constructor(x) gcv = RidgeCV(fit_intercept=fit_intercept, alphas=alphas, From 2beac46f1d3917e29f47e980667c49231b4736b2 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 14:07:43 +0200 Subject: [PATCH 040/103] compare gcv+sample weights with groupkfold --- sklearn/linear_model/tests/test_ridge.py | 48 +++++++++++++++++------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index ce6f98db64d41..11ae59cc4c180 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -36,7 +36,7 @@ from sklearn.datasets import make_regression from sklearn.model_selection import GridSearchCV -from sklearn.model_selection import KFold +from sklearn.model_selection import KFold, GroupKFold, cross_val_predict from sklearn.utils import check_random_state from sklearn.datasets import make_multilabel_classification @@ -345,11 +345,13 @@ def test_ridge_gcv_vs_k_fold(gcv_mode, X_constructor, X_shape, fit_intercept, @pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('fit_intercept', [True, False]) -@pytest.mark.parametrize('n_features', [7, 31]) +@pytest.mark.parametrize('n_features', [11, 69]) def test_ridge_gcv_sample_weights( gcv_mode, X_constructor, fit_intercept, n_features): - x, y, c = make_regression( - n_samples=23, n_features=n_features, n_targets=4, coef=True, + alphas = [1e-3, .1, 1., 10., 1e3] + + x, y, c = datasets.make_regression( + n_samples=53, n_features=n_features, n_targets=4, coef=True, random_state=0, shuffle=False, noise=30.) x += 30 * np.random.RandomState(0).randn(x.shape[1]) sample_weights = 3 * np.random.RandomState(0).randn(len(x)) @@ -358,16 +360,36 @@ def test_ridge_gcv_sample_weights( indices = np.concatenate([n * [i] for (i, n) in enumerate(sample_weights)]) sample_weights = 1. * sample_weights tiled_x, tiled_y = x[indices], y[indices] - alphas = [1.] - ridge = Ridge( - fit_intercept=fit_intercept, alpha=alphas[0], normalize=False) - ridge.fit(tiled_x, tiled_y) + + cv = GroupKFold(n_splits=x.shape[0]) + splits = cv.split(tiled_x, tiled_y, groups=indices) + kfold = RidgeCV( + alphas=alphas, cv=splits, scoring='neg_mean_squared_error', + fit_intercept=fit_intercept) + with ignore_warnings(): + kfold.fit(tiled_x, tiled_y) + + ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept) + splits = cv.split(tiled_x, tiled_y, groups=indices) + predictions = cross_val_predict(ridge_reg, tiled_x, tiled_y, cv=splits) + kfold_errors = (tiled_y - predictions)**2 + kfold_errors = [ + np.sum(kfold_errors[indices == i], axis=0) for + i in np.arange(x.shape[0])] + kfold_errors = np.asarray(kfold_errors) + x_gcv = X_constructor(x) - gcv = RidgeCV(fit_intercept=fit_intercept, alphas=alphas, - normalize=False, gcv_mode=gcv_mode) - gcv.fit(x_gcv, y, sample_weight=sample_weights) - assert np.allclose(gcv.coef_, ridge.coef_, rtol=1e-2) - assert np.allclose(gcv.intercept_, ridge.intercept_, rtol=1e-2) + ridge_gcv = RidgeCV( + alphas=alphas, store_cv_values=True, + gcv_mode=gcv_mode, fit_intercept=fit_intercept) + ridge_gcv.fit(x_gcv, y, sample_weight=sample_weights) + gcv_errors = ridge_gcv.cv_values_[:, :, alphas.index(kfold.alpha_)] + + assert kfold.alpha_ == ridge_gcv.alpha_ + assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) + assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) + assert_allclose(ridge_gcv.coef_, kfold.coef_, rtol=5e-2) + assert_allclose(ridge_gcv.intercept_, kfold.intercept_, rtol=5e-2) def _test_ridge_loo(filter_): From bad249e0da29968f6c5fb7894f315afa7091de22 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 14:41:46 +0200 Subject: [PATCH 041/103] address @glemaitre review --- sklearn/linear_model/tests/test_ridge.py | 65 ++++++++++++------------ 1 file changed, 32 insertions(+), 33 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 11ae59cc4c180..0ce3578a49467 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -349,47 +349,46 @@ def test_ridge_gcv_vs_k_fold(gcv_mode, X_constructor, X_shape, fit_intercept, def test_ridge_gcv_sample_weights( gcv_mode, X_constructor, fit_intercept, n_features): alphas = [1e-3, .1, 1., 10., 1e3] - - x, y, c = datasets.make_regression( - n_samples=53, n_features=n_features, n_targets=4, coef=True, + rng = np.random.RandomState(0) + x, y = datasets.make_regression( + n_samples=59, n_features=n_features, n_targets=4, random_state=0, shuffle=False, noise=30.) - x += 30 * np.random.RandomState(0).randn(x.shape[1]) - sample_weights = 3 * np.random.RandomState(0).randn(len(x)) - sample_weights = np.asarray( - sample_weights - sample_weights.min() + 1, dtype=int) - indices = np.concatenate([n * [i] for (i, n) in enumerate(sample_weights)]) - sample_weights = 1. * sample_weights - tiled_x, tiled_y = x[indices], y[indices] + x += 30 * rng.randn(x.shape[1]) + sample_weight = 3 * rng.randn(len(x)) + sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) + indices = np.repeat(np.arange(x.shape[0]), sample_weight) + sample_weight = sample_weight.astype(float) + X_tiled, y_tiled = x[indices], y[indices] cv = GroupKFold(n_splits=x.shape[0]) - splits = cv.split(tiled_x, tiled_y, groups=indices) + splits = cv.split(X_tiled, y_tiled, groups=indices) kfold = RidgeCV( alphas=alphas, cv=splits, scoring='neg_mean_squared_error', fit_intercept=fit_intercept) with ignore_warnings(): - kfold.fit(tiled_x, tiled_y) + kfold.fit(X_tiled, y_tiled) ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept) - splits = cv.split(tiled_x, tiled_y, groups=indices) - predictions = cross_val_predict(ridge_reg, tiled_x, tiled_y, cv=splits) - kfold_errors = (tiled_y - predictions)**2 + splits = cv.split(X_tiled, y_tiled, groups=indices) + predictions = cross_val_predict(ridge_reg, X_tiled, y_tiled, cv=splits) + kfold_errors = (y_tiled - predictions)**2 kfold_errors = [ np.sum(kfold_errors[indices == i], axis=0) for i in np.arange(x.shape[0])] kfold_errors = np.asarray(kfold_errors) x_gcv = X_constructor(x) - ridge_gcv = RidgeCV( + gcv_ridge = RidgeCV( alphas=alphas, store_cv_values=True, gcv_mode=gcv_mode, fit_intercept=fit_intercept) - ridge_gcv.fit(x_gcv, y, sample_weight=sample_weights) - gcv_errors = ridge_gcv.cv_values_[:, :, alphas.index(kfold.alpha_)] + gcv_ridge.fit(x_gcv, y, sample_weight=sample_weight) + gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] - assert kfold.alpha_ == ridge_gcv.alpha_ + assert kfold.alpha_ == gcv_ridge.alpha_ assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) - assert_allclose(ridge_gcv.coef_, kfold.coef_, rtol=5e-2) - assert_allclose(ridge_gcv.intercept_, kfold.intercept_, rtol=5e-2) + assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=5e-2) + assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=5e-2) def _test_ridge_loo(filter_): @@ -433,8 +432,8 @@ def _test_ridge_loo(filter_): values2.append(value) # check that efficient and brute-force LOO give same results - assert_almost_equal(errors, errors2) - assert_almost_equal(values, values2) + assert errors == pytest.approx(errors2) + assert values == pytest.approx(values2) # generalized cross-validation (efficient leave-one-out, # SVD variation) @@ -443,8 +442,8 @@ def _test_ridge_loo(filter_): values3, c = ridge_gcv._values_svd_dense(ridge.alpha, y_diabetes, *decomp) # check that efficient and SVD efficient LOO give same results - assert_almost_equal(errors, errors3) - assert_almost_equal(values, values3) + assert errors == pytest.approx(errors3) + assert values == pytest.approx(values3) # generalized cross-validation (efficient leave-one-out, # SVD variation) @@ -454,8 +453,8 @@ def _test_ridge_loo(filter_): values4, c = ridge_gcv._values_svd_sparse(ridge.alpha, y_diabetes, *decomp) # check that efficient and SVD efficient LOO give same results - assert_almost_equal(errors, errors4) - assert_almost_equal(values, values4) + assert errors == pytest.approx(errors4) + assert values == pytest.approx(values4) # check best alpha ridge_gcv.fit(filter_(X_diabetes), y_diabetes) @@ -467,26 +466,26 @@ def _test_ridge_loo(filter_): scoring = make_scorer(mean_squared_error, greater_is_better=False) ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring) f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes) - assert_equal(ridge_gcv2.alpha_, alpha_) + assert ridge_gcv2.alpha_ == pytest.approx(alpha_) # check that we get same best alpha with custom score_func func = lambda x, y: -mean_squared_error(x, y) scoring = make_scorer(func) ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring) f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes) - assert_equal(ridge_gcv3.alpha_, alpha_) + assert ridge_gcv3.alpha_ == pytest.approx(alpha_) # check that we get same best alpha with a scorer scorer = get_scorer('neg_mean_squared_error') ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer) ridge_gcv4.fit(filter_(X_diabetes), y_diabetes) - assert_equal(ridge_gcv4.alpha_, alpha_) + assert ridge_gcv4.alpha_ == pytest.approx(alpha_) # check that we get same best alpha with sample weights if filter_ == DENSE_FILTER: ridge_gcv.fit(filter_(X_diabetes), y_diabetes, sample_weight=np.ones(n_samples)) - assert_equal(ridge_gcv.alpha_, alpha_) + assert ridge_gcv.alpha_ == pytest.approx(alpha_) # simulate several responses Y = np.vstack((y_diabetes, y_diabetes)).T @@ -496,8 +495,8 @@ def _test_ridge_loo(filter_): ridge_gcv.fit(filter_(X_diabetes), y_diabetes) y_pred = ridge_gcv.predict(filter_(X_diabetes)) - assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, - Y_pred, decimal=5) + assert_allclose(np.vstack((y_pred, y_pred)).T, + Y_pred, rtol=1e-5) return ret From d8ad9b33af4162e3e4eb8661b153760987e6ae72 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 14:52:19 +0200 Subject: [PATCH 042/103] test with small noise --- sklearn/linear_model/tests/test_ridge.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 0ce3578a49467..e9ec267487cae 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -316,12 +316,13 @@ def test_ridge_individual_penalties(): @pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)]) @pytest.mark.parametrize('fit_intercept', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) +@pytest.mark.parametrize('noise', [1., 30.]) def test_ridge_gcv_vs_k_fold(gcv_mode, X_constructor, X_shape, fit_intercept, - normalize): + normalize, noise): n_samples, n_features = X_shape X, y = make_regression( n_samples=n_samples, n_features=n_features, n_targets=3, - random_state=0, shuffle=False, noise=30., n_informative=5 + random_state=0, shuffle=False, noise=noise, n_informative=5 ) X += 30 * np.random.RandomState(0).randn(X.shape[1]) @@ -346,13 +347,14 @@ def test_ridge_gcv_vs_k_fold(gcv_mode, X_constructor, X_shape, fit_intercept, @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('fit_intercept', [True, False]) @pytest.mark.parametrize('n_features', [11, 69]) +@pytest.mark.parametrize('noise', [1., 30.]) def test_ridge_gcv_sample_weights( - gcv_mode, X_constructor, fit_intercept, n_features): + gcv_mode, X_constructor, fit_intercept, n_features, noise): alphas = [1e-3, .1, 1., 10., 1e3] rng = np.random.RandomState(0) x, y = datasets.make_regression( n_samples=59, n_features=n_features, n_targets=4, - random_state=0, shuffle=False, noise=30.) + random_state=0, shuffle=False, noise=noise) x += 30 * rng.randn(x.shape[1]) sample_weight = 3 * rng.randn(len(x)) sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) From a5b62c8c749b2b86754399f3a67037ccda69f223 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 16:19:39 +0200 Subject: [PATCH 043/103] address comments by @glemaitre and @thomasjpfan --- sklearn/linear_model/ridge.py | 95 +++++++++++++++++++++++------------ 1 file changed, 62 insertions(+), 33 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 9039fe05083c6..7af2c34743ce7 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -920,8 +920,12 @@ def classes_(self): def _check_gcv_mode(X, gcv_mode): - if gcv_mode not in {None, 'auto', 'svd', 'eigen'}: - raise ValueError('bad gcv_mode "%s"' % gcv_mode) + possible_gcv_modes = {None, 'auto', 'svd', 'eigen'} + if gcv_mode not in possible_gcv_modes: + raise ValueError( + "Unknown value for 'gcv_mode'. " + "Got {} instead of one of {}" .format( + gcv_mode, possible_gcv_modes)) if gcv_mode not in {None, 'auto'}: return gcv_mode # if X has more rows than columns, use decomposition of X^T.X, @@ -994,30 +998,41 @@ def _diag_dot(self, D, B): return D * B def _compute_gram(self, X, center=True): - """Computes centered Gram matrix. + """Computes the Gram matrix with possible centering. - Notes - ----- - if center is True, compute + If ``center`` is ``True``, compute (X - X.mean(axis=0)).dot((X - X.mean(axis=0)).T) - else - X.dot(X.T) + else X.dot(X.T) + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + The input uncentered data. + center : bool, default is True + Whether or not to remove the mean from ``X``. + + Returns + ------- + gram : ndarray, shape (n_samples, n_samples) + The Gram matrix. + X_m : ndarray, shape (n_feature,) + The mean of ``X`` for each feature. """ if not center: X_m = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X, X.T, dense_output=True), X_m - n = X.shape[0] - X_w = self._sqrt_sw_matrix.dot(X) - X_m, _ = mean_variance_axis(X_w, axis=0) - X_m = X_m * n / self._weight_sum + n_samples = X.shape[0] + X_weighted = self._sqrt_sw_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean *= n_samples / self._weight_sum X_mX = self._sqrt_sw[:, None] * safe_sparse_dot( - X_m, X.T, dense_output=True) - X_mX_m = np.empty((X.shape[0], X.shape[0]), dtype=X.dtype) - X_mX_m[:, :] = np.dot(X_m, X_m) - X_mX_m = X_mX_m * self._sqrt_sw - X_mX_m = X_mX_m * self._sqrt_sw[:, None] + X_mean, X.T, dense_output=True) + X_mX_m = np.empty((n_samples, n_samples), dtype=X.dtype) + X_mX_m[:, :] = np.dot(X_mean, X_mean) + X_mX_m *= self._sqrt_sw + X_mX_m *= self._sqrt_sw[:, None] return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - - X_mX - X_mX.T, X_m) + - X_mX - X_mX.T, X_mean) def _compute_covariance(self, X, center=True): """Computes centered Gram matrix. @@ -1028,17 +1043,31 @@ def _compute_covariance(self, X, center=True): (X - X.mean(axis=0)).T.dot(X - X.mean(axis=0)) else X.T.dot(X) + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + The input uncentered data. + center : bool, default is True + Whether or not to remove the mean from ``X``. + + Returns + ------- + covariance : ndarray, shape (n_features, n_features) + The covariance matrix. + X_m : ndarray, shape (n_feature,) + The mean of ``X`` for each feature. """ if not center: X_m = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X.T, X, dense_output=True), X_m - n = X.shape[0] - X_w = self._sqrt_sw_matrix.dot(X) - X_m, _ = mean_variance_axis(X_w, axis=0) - X_m = X_m * n / self._weight_sum - return safe_sparse_dot( - X.T, X, dense_output=True) - self._weight_sum * np.outer( - X_m, X_m), X_m + n_samples = X.shape[0] + X_weighted = self._sqrt_sw_matrix.dot(X) + X_mean, _ = mean_variance_axis(X_weighted, axis=0) + X_mean = X_mean * n_samples / self._weight_sum + return (safe_sparse_dot(X.T, X, dense_output=True) - + self._weight_sum * np.outer(X_mean, X_mean), + X_mean) def _sparse_multidot_diag(self, X, A, Xm): """ @@ -1101,7 +1130,7 @@ def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): # when no sample weights) is the eigenvector of XX^T which # corresponds to the intercept; we cancel the regularization on # this dimension. the corresponding eigenvalue is - # sum(sample_weights). + # sum(sample_weight). if self._with_sw: intercept_dim = np.isclose( Q, self._normalized_sqrt_sw[:, None]).all(axis=0) @@ -1179,7 +1208,7 @@ def _errors_and_values_svd_helper_sparse_intercept( # is the eigenvector of X^TX which # corresponds to the intercept; we cancel the regularization on # this dimension. the corresponding eigenvalue is - # sum(sample_weights), e.g. n when uniform sample weights. + # sum(sample_weight), e.g. n when uniform sample weights. intercept_sv = np.zeros(V.shape[0]) intercept_sv[-1] = 1 intercept_dim = np.isclose(V, intercept_sv[:, None]).all(axis=0) @@ -1300,6 +1329,11 @@ def fit(self, X, y, sample_weight=None): ------- self : object """ + if np.any(self.alphas <= 0): + raise ValueError( + "alphas must be positive. Got {} containing some " + "negative or null value instead.".format(self.alphas)) + X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64, multi_output=True, y_numeric=True) if sample_weight is not None and not isinstance(sample_weight, float): @@ -1340,7 +1374,6 @@ def fit(self, X, y, sample_weight=None): self._weight_sum = X.shape[0] self._sqrt_sw_matrix = sparse.dia_matrix( (self._sqrt_sw, 0), shape=(X.shape[0], X.shape[0])) - precomputed = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] @@ -1348,11 +1381,7 @@ def fit(self, X, y, sample_weight=None): scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None - if np.any(self.alphas <= 0): - raise ValueError( - "alphas must be positive. Got {} containing some " - "negative or null value instead.".format(self.alphas)) - + precomputed = _pre_compute(X, y) for i, alpha in enumerate(self.alphas): if error: out, c = _errors(float(alpha), y, *precomputed) From c99ff0691bc7e248356e7004a7c5a8caedb24027 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 16:23:44 +0200 Subject: [PATCH 044/103] fix docstring --- sklearn/linear_model/ridge.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 7af2c34743ce7..c1eff9173da41 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1035,10 +1035,8 @@ def _compute_gram(self, X, center=True): - X_mX - X_mX.T, X_mean) def _compute_covariance(self, X, center=True): - """Computes centered Gram matrix. + """Computes centered covariance matrix. - Notes - ----- if center is True, compute (X - X.mean(axis=0)).T.dot(X - X.mean(axis=0)) else From e35c648705b0babd62d4be08bf86e9cb23393576 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 30 Apr 2019 16:27:06 +0200 Subject: [PATCH 045/103] comments --- sklearn/linear_model/ridge.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index c1eff9173da41..bbc59da1aeec5 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1019,8 +1019,11 @@ def _compute_gram(self, X, center=True): The mean of ``X`` for each feature. """ if not center: - X_m = np.zeros(X.shape[1], dtype=X.dtype) - return safe_sparse_dot(X, X.T, dense_output=True), X_m + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X, X.T, dense_output=True), X_mean + # otherwise X is always sparse n_samples = X.shape[0] X_weighted = self._sqrt_sw_matrix.dot(X) X_mean, _ = mean_variance_axis(X_weighted, axis=0) @@ -1057,8 +1060,11 @@ def _compute_covariance(self, X, center=True): The mean of ``X`` for each feature. """ if not center: - X_m = np.zeros(X.shape[1], dtype=X.dtype) - return safe_sparse_dot(X.T, X, dense_output=True), X_m + # in this case centering has been done in preprocessing + # or we are not fitting an intercept. + X_mean = np.zeros(X.shape[1], dtype=X.dtype) + return safe_sparse_dot(X.T, X, dense_output=True), X_mean + # otherwise X is always sparse n_samples = X.shape[0] X_weighted = self._sqrt_sw_matrix.dot(X) X_mean, _ = mean_variance_axis(X_weighted, axis=0) From 58c0cc35560c1cd1581f7381434cfd72242edb9f Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Wed, 1 May 2019 16:10:24 +0200 Subject: [PATCH 046/103] Apply suggestions from code review Co-Authored-By: jeromedockes --- sklearn/linear_model/ridge.py | 41 ++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index bbc59da1aeec5..172ad8612d230 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1040,7 +1040,7 @@ def _compute_gram(self, X, center=True): def _compute_covariance(self, X, center=True): """Computes centered covariance matrix. - if center is True, compute + If ``center`` is ``True``, compute (X - X.mean(axis=0)).T.dot(X - X.mean(axis=0)) else X.T.dot(X) @@ -1073,7 +1073,7 @@ def _compute_covariance(self, X, center=True): self._weight_sum * np.outer(X_mean, X_mean), X_mean) - def _sparse_multidot_diag(self, X, A, Xm): + def _sparse_multidot_diag(self, X, A, X_mean): """ compute the diagonal of (X - Xm).dot(A).dot((X - Xm).T) when X is sparse, without storing X - Xm nor X.dot(A) @@ -1086,9 +1086,10 @@ def _sparse_multidot_diag(self, X, A, Xm): batch = slice(start, min(X.shape[0], start + batch_size), 1) X_batch = np.empty( (X[batch].shape[0], X.shape[1] + self.fit_intercept), - dtype=X.dtype) + dtype=X.dtype + ) if self.fit_intercept: - X_batch[:, :-1] = X[batch].A - Xm * scale[batch][:, None] + X_batch[:, :-1] = X[batch].A - X_mean * scale[batch][:, None] X_batch[:, -1] = intercept_col[batch] else: X_batch = X[batch].A @@ -1098,7 +1099,7 @@ def _sparse_multidot_diag(self, X, A, Xm): def _set_intercept(self, X_offset, y_offset, X_scale): # add the mean of X which was computed separately if X is sparse if getattr(self, '_X_offset', None) is not None: - X_offset = X_offset + self._X_offset * X_scale + X_offset += self._X_offset * X_scale super()._set_intercept(X_offset, y_offset, X_scale) def _pre_compute(self, X, y): @@ -1158,9 +1159,9 @@ def _values(self, alpha, y, v, Q, QT_y): return y - (c / G_diag), c def _pre_compute_svd_sparse(self, X, y): - n, p = X.shape - cov = np.empty((p + 1, p + 1)) - cov[:-1, :-1], X_m = self._compute_covariance(X, self.fit_intercept) + n_samples, n_features = X.shape + cov = np.empty((n_features + 1, n_features + 1)) + cov[:-1, :-1], self._X_offset = self._compute_covariance(X, self.fit_intercept) if not self.fit_intercept: cov = cov[:-1, :-1] # to emulate centering X with sample weights, @@ -1174,25 +1175,24 @@ def _pre_compute_svd_sparse(self, X, y): if self._with_sw: cov[-1, -1] = self._weight_sum else: - cov[-1, -1] = n + cov[-1, -1] = n_samples kernel_size = max(0, X.shape[1] - X.shape[0]) s, V = linalg.eigh(cov) # remove eigenvalues and vectors in the null space of X^T.X s = s[kernel_size:] V = V[:, kernel_size:] - self._X_offset = X_m return s, V, X def _errors_and_values_svd_helper_sparse_no_intercept( self, alpha, y, s, V, X): """compute loo values and dual coef when X is sparse""" - n, p = X.shape + n_samples, n_features = X.shape w = 1 / (s + alpha) A = (V * w).dot(V.T) - Xm = self._X_offset + X_mean = self._X_offset AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) y_hat = safe_sparse_dot(X, AXy, dense_output=True) - hat_diag = self._sparse_multidot_diag(X, A, Xm) + hat_diag = self._sparse_multidot_diag(X, A, X_mean) if len(y.shape) != 1: # handle case where y is 2-d hat_diag = hat_diag[:, np.newaxis] @@ -1207,7 +1207,7 @@ def _errors_and_values_svd_helper_sparse_intercept( intercept. """ - n, p = X.shape + n_samples, n_features = X.shape # the vector [0, 0, ..., 0, 1] # is the eigenvector of X^TX which # corresponds to the intercept; we cancel the regularization on @@ -1219,7 +1219,7 @@ def _errors_and_values_svd_helper_sparse_intercept( w = 1 / (s + alpha) w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) - Xm = self._X_offset + X_mean = self._X_offset # add a column to X containing the square roots of sample weights sw = self._sqrt_sw if self._with_sw else np.ones( X.shape[0], dtype=X.dtype) @@ -1227,24 +1227,25 @@ def _errors_and_values_svd_helper_sparse_intercept( def matvec(v): return safe_sparse_dot( X, v[:-1], dense_output=True - ) - sw * Xm.dot(v[:-1]) + v[-1] * sw + ) - sw * X_mean.dot(v[:-1]) + v[-1] * sw def matmat(v): return safe_sparse_dot( X, v[:-1], dense_output=True - ) - sw[:, None] * Xm.dot(v[:-1]) + v[-1] * sw[:, None] + ) - sw[:, None] * X_mean.dot(v[:-1]) + v[-1] * sw[:, None] def rmatvec(v): v = v.ravel() - res = np.empty(X.shape[1] + 1) + res = np.empty(n_features + 1) res[:-1] = safe_sparse_dot( - X.T, v, dense_output=True) - Xm * sw.dot(v) + X.T, v, dense_output=True) - X_mean * sw.dot(v) res[-1] = np.dot(v, sw) return res Xop = sparse.linalg.LinearOperator( matvec=matvec, matmat=matmat, rmatvec=rmatvec, - shape=(X.shape[0], X.shape[1] + 1), dtype=X.dtype) + shape=(n_samples, n_feautres + 1), dtype=X.dtype + ) AXy = A.dot(Xop.adjoint().dot(y)) y_hat = Xop.dot(AXy) hat_diag = self._sparse_multidot_diag(X, A, Xm) From 080ddeff94668bea0e114ab54c9ae55a9345ab61 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 1 May 2019 16:13:16 +0200 Subject: [PATCH 047/103] fix variable name --- sklearn/linear_model/ridge.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 172ad8612d230..8fc1d67e0633f 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1244,11 +1244,11 @@ def rmatvec(v): Xop = sparse.linalg.LinearOperator( matvec=matvec, matmat=matmat, rmatvec=rmatvec, - shape=(n_samples, n_feautres + 1), dtype=X.dtype + shape=(n_samples, n_features + 1), dtype=X.dtype ) AXy = A.dot(Xop.adjoint().dot(y)) y_hat = Xop.dot(AXy) - hat_diag = self._sparse_multidot_diag(X, A, Xm) + hat_diag = self._sparse_multidot_diag(X, A, X_mean) # return (1 - hat_diag), (y - y_hat) if len(y.shape) != 1: # handle case where y is 2-d From 5a1452743c9f71481a223099acbf261c7cd3bfc0 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 1 May 2019 16:31:57 +0200 Subject: [PATCH 048/103] better names for _RidgeGCV private helper functions --- sklearn/linear_model/ridge.py | 71 ++++++++++++------------ sklearn/linear_model/tests/test_ridge.py | 38 +++++++------ 2 files changed, 57 insertions(+), 52 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 8fc1d67e0633f..d160582b91684 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1102,7 +1102,7 @@ def _set_intercept(self, X_offset, y_offset, X_scale): X_offset += self._X_offset * X_scale super()._set_intercept(X_offset, y_offset, X_scale) - def _pre_compute(self, X, y): + def _decompose_gram(self, X, y): # if X is dense it has already been centered in preprocessing center = self.fit_intercept and sparse.issparse(X) K, X_m = self._compute_gram(X, center) @@ -1121,7 +1121,7 @@ def _pre_compute(self, X, y): self._X_offset = X_m return v, Q, QT_y - def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): + def _errors_and_values_gram(self, alpha, y, v, Q, QT_y): """Helper function to avoid code duplication between self._errors and self._values. @@ -1150,18 +1150,19 @@ def _errors_and_values_helper(self, alpha, y, v, Q, QT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors(self, alpha, y, v, Q, QT_y): - G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y) + def _errors_gram(self, alpha, y, v, Q, QT_y): + G_diag, c = self._errors_and_values_gram(alpha, y, v, Q, QT_y) return (c / G_diag) ** 2, c - def _values(self, alpha, y, v, Q, QT_y): - G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y) + def _values_gram(self, alpha, y, v, Q, QT_y): + G_diag, c = self._errors_and_values_gram(alpha, y, v, Q, QT_y) return y - (c / G_diag), c - def _pre_compute_svd_sparse(self, X, y): + def _decompose_covariance_sparse(self, X, y): n_samples, n_features = X.shape cov = np.empty((n_features + 1, n_features + 1)) - cov[:-1, :-1], self._X_offset = self._compute_covariance(X, self.fit_intercept) + cov[:-1, :-1], self._X_offset = self._compute_covariance( + X, self.fit_intercept) if not self.fit_intercept: cov = cov[:-1, :-1] # to emulate centering X with sample weights, @@ -1183,7 +1184,7 @@ def _pre_compute_svd_sparse(self, X, y): V = V[:, kernel_size:] return s, V, X - def _errors_and_values_svd_helper_sparse_no_intercept( + def _errors_and_values_covariance_sparse_no_intercept( self, alpha, y, s, V, X): """compute loo values and dual coef when X is sparse""" n_samples, n_features = X.shape @@ -1198,7 +1199,7 @@ def _errors_and_values_svd_helper_sparse_no_intercept( hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _errors_and_values_svd_helper_sparse_intercept( + def _errors_and_values_covariance_sparse_intercept( self, alpha, y, s, V, X): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. @@ -1255,14 +1256,14 @@ def rmatvec(v): hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _errors_and_values_svd_helper_sparse(self, alpha, y, s, V, X): + def _errors_and_values_covariance_sparse(self, alpha, y, s, V, X): if self.fit_intercept: - return self._errors_and_values_svd_helper_sparse_intercept( + return self._errors_and_values_covariance_sparse_intercept( alpha, y, s, V, X) - return self._errors_and_values_svd_helper_sparse_no_intercept( + return self._errors_and_values_covariance_sparse_no_intercept( alpha, y, s, V, X) - def _pre_compute_svd_dense(self, X, y): + def _decompose_covariance_dense(self, X, y): if self.fit_intercept: if self._with_sw: intercept = self._sqrt_sw[:, None] @@ -1276,7 +1277,7 @@ def _pre_compute_svd_dense(self, X, y): UT_y = np.dot(U.T, y) return v, U, UT_y - def _errors_and_values_svd_helper_dense(self, alpha, y, v, U, UT_y): + def _errors_and_values_covariance_dense(self, alpha, y, v, U, UT_y): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. """ @@ -1296,23 +1297,23 @@ def _errors_and_values_svd_helper_dense(self, alpha, y, v, U, UT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_svd_sparse(self, alpha, y, v, U, UT_y): - G_diag, c = self._errors_and_values_svd_helper_sparse( + def _errors_covariance_sparse(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_covariance_sparse( alpha, y, v, U, UT_y) return (c / G_diag) ** 2, c - def _values_svd_sparse(self, alpha, y, v, U, UT_y): - G_diag, c = self._errors_and_values_svd_helper_sparse( + def _values_covariance_sparse(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_covariance_sparse( alpha, y, v, U, UT_y) return y - (c / G_diag), c - def _errors_svd_dense(self, alpha, y, v, U, UT_y): - G_diag, c = self._errors_and_values_svd_helper_dense( + def _errors_covariance_dense(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_covariance_dense( alpha, y, v, U, UT_y) return (c / G_diag) ** 2, c - def _values_svd_dense(self, alpha, y, v, U, UT_y): - G_diag, c = self._errors_and_values_svd_helper_dense( + def _values_covariance_dense(self, alpha, y, v, U, UT_y): + G_diag, c = self._errors_and_values_covariance_dense( alpha, y, v, U, UT_y) return y - (c / G_diag), c @@ -1352,19 +1353,19 @@ def fit(self, X, y, sample_weight=None): gcv_mode = _check_gcv_mode(X, self.gcv_mode) if gcv_mode == 'eigen': - _pre_compute = self._pre_compute - _errors = self._errors - _values = self._values + _decompose = self._decompose_gram + _errors = self._errors_gram + _values = self._values_gram elif gcv_mode == 'svd': # assert n_samples >= n_features if sparse.issparse(X): - _pre_compute = self._pre_compute_svd_sparse - _errors = self._errors_svd_sparse - _values = self._values_svd_sparse + _decompose = self._decompose_covariance_sparse + _errors = self._errors_covariance_sparse + _values = self._values_covariance_sparse else: - _pre_compute = self._pre_compute_svd_dense - _errors = self._errors_svd_dense - _values = self._values_svd_dense + _decompose = self._decompose_covariance_dense + _errors = self._errors_covariance_dense + _values = self._values_covariance_dense if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) @@ -1386,12 +1387,12 @@ def fit(self, X, y, sample_weight=None): scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None - precomputed = _pre_compute(X, y) + decomposition = _decompose(X, y) for i, alpha in enumerate(self.alphas): if error: - out, c = _errors(float(alpha), y, *precomputed) + out, c = _errors(float(alpha), y, *decomposition) else: - out, c = _values(float(alpha), y, *precomputed) + out, c = _values(float(alpha), y, *decomposition) cv_values[:, i] = out.ravel() C.append(c) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index e9ec267487cae..cb3108b25d19d 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -416,9 +416,9 @@ def _test_ridge_loo(filter_): # because fit_intercept is applied # generalized cross-validation (efficient leave-one-out) - decomp = ridge_gcv._pre_compute(X_diabetes_, y_diabetes) - errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp) - values, c = ridge_gcv._values(1.0, y_diabetes, *decomp) + decomp = ridge_gcv._decompose_gram(X_diabetes_, y_diabetes) + errors, c = ridge_gcv._errors_gram(1.0, y_diabetes, *decomp) + values, c = ridge_gcv._values_gram(1.0, y_diabetes, *decomp) # brute-force leave-one-out: remove one example at a time errors2 = [] @@ -439,9 +439,11 @@ def _test_ridge_loo(filter_): # generalized cross-validation (efficient leave-one-out, # SVD variation) - decomp = ridge_gcv._pre_compute_svd_dense(X_diabetes_, y_diabetes) - errors3, c = ridge_gcv._errors_svd_dense(ridge.alpha, y_diabetes, *decomp) - values3, c = ridge_gcv._values_svd_dense(ridge.alpha, y_diabetes, *decomp) + decomp = ridge_gcv._decompose_covariance_dense(X_diabetes_, y_diabetes) + errors3, c = ridge_gcv._errors_covariance_dense( + ridge.alpha, y_diabetes, *decomp) + values3, c = ridge_gcv._values_covariance_dense( + ridge.alpha, y_diabetes, *decomp) # check that efficient and SVD efficient LOO give same results assert errors == pytest.approx(errors3) @@ -449,10 +451,12 @@ def _test_ridge_loo(filter_): # generalized cross-validation (efficient leave-one-out, # SVD variation) - decomp = ridge_gcv._pre_compute_svd_sparse( + decomp = ridge_gcv._decompose_covariance_sparse( sp.csr_matrix(X_diabetes_), y_diabetes) - errors4, c = ridge_gcv._errors_svd_sparse(ridge.alpha, y_diabetes, *decomp) - values4, c = ridge_gcv._values_svd_sparse(ridge.alpha, y_diabetes, *decomp) + errors4, c = ridge_gcv._errors_covariance_sparse( + ridge.alpha, y_diabetes, *decomp) + values4, c = ridge_gcv._values_covariance_sparse( + ridge.alpha, y_diabetes, *decomp) # check that efficient and SVD efficient LOO give same results assert errors == pytest.approx(errors4) @@ -981,7 +985,7 @@ def test_ridge_regression_check_arguments_validity(return_intercept, assert_allclose(out, true_coefs, rtol=0, atol=atol) -def test_errors_and_values_helper(): +def test_errors_and_values_gram(): ridgecv = _RidgeGCV() ridgecv._with_sw = False rng = check_random_state(42) @@ -991,19 +995,19 @@ def test_errors_and_values_helper(): v = rng.randn(n) Q = rng.randn(len(v), len(v)) QT_y = Q.T.dot(y) - G_diag, c = ridgecv._errors_and_values_helper(alpha, y, v, Q, QT_y) + G_diag, c = ridgecv._errors_and_values_gram(alpha, y, v, Q, QT_y) # test that helper function behaves as expected - out, c_ = ridgecv._errors(alpha, y, v, Q, QT_y) + out, c_ = ridgecv._errors_gram(alpha, y, v, Q, QT_y) np.testing.assert_array_equal(out, (c / G_diag) ** 2) np.testing.assert_array_equal(c, c) - out, c_ = ridgecv._values(alpha, y, v, Q, QT_y) + out, c_ = ridgecv._values_gram(alpha, y, v, Q, QT_y) np.testing.assert_array_equal(out, y - (c / G_diag)) np.testing.assert_array_equal(c_, c) -def test_errors_and_values_svd_helper(): +def test_errors_and_values_covariance(): ridgecv = _RidgeGCV() ridgecv._with_sw = False rng = check_random_state(42) @@ -1013,15 +1017,15 @@ def test_errors_and_values_svd_helper(): v = rng.randn(p) U = rng.randn(n, p) UT_y = U.T.dot(y) - G_diag, c = ridgecv._errors_and_values_svd_helper_dense( + G_diag, c = ridgecv._errors_and_values_covariance_dense( alpha, y, v, U, UT_y) # test that helper function behaves as expected - out, c_ = ridgecv._errors_svd_dense(alpha, y, v, U, UT_y) + out, c_ = ridgecv._errors_covariance_dense(alpha, y, v, U, UT_y) np.testing.assert_array_equal(out, (c / G_diag) ** 2) np.testing.assert_array_equal(c, c) - out, c_ = ridgecv._values_svd_dense(alpha, y, v, U, UT_y) + out, c_ = ridgecv._values_covariance_dense(alpha, y, v, U, UT_y) np.testing.assert_array_equal(out, y - (c / G_diag)) np.testing.assert_array_equal(c_, c) From f28db91d44c2169f4acb0e550b3813ff73e1b24a Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 1 May 2019 17:10:27 +0200 Subject: [PATCH 049/103] compare singular vectors to +- sqrt sample weights instead of checking variance --- sklearn/linear_model/ridge.py | 29 ++++++++++++------------ sklearn/linear_model/tests/test_ridge.py | 4 ++++ 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index d160582b91684..72c070d0e81a1 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1136,11 +1136,10 @@ def _errors_and_values_gram(self, alpha, y, v, Q, QT_y): # corresponds to the intercept; we cancel the regularization on # this dimension. the corresponding eigenvalue is # sum(sample_weight). - if self._with_sw: - intercept_dim = np.isclose( - Q, self._normalized_sqrt_sw[:, None]).all(axis=0) - else: - intercept_dim = np.var(Q, 0) < 1.e-12 + intercept_dim = np.logical_or( + np.isclose(Q, self._normalized_sqrt_sw[:, None]).all(axis=0), + np.isclose(Q, - self._normalized_sqrt_sw[:, None]).all(axis=0) + ) w[intercept_dim] = 0 # cancel regularization for the intercept c = np.dot(Q, self._diag_dot(w, QT_y)) @@ -1216,7 +1215,10 @@ def _errors_and_values_covariance_sparse_intercept( # sum(sample_weight), e.g. n when uniform sample weights. intercept_sv = np.zeros(V.shape[0]) intercept_sv[-1] = 1 - intercept_dim = np.isclose(V, intercept_sv[:, None]).all(axis=0) + intercept_dim = np.logical_or( + np.isclose(V, intercept_sv[:, None]).all(axis=0), + np.isclose(V, - intercept_sv[:, None]).all(axis=0) + ) w = 1 / (s + alpha) w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) @@ -1281,11 +1283,10 @@ def _errors_and_values_covariance_dense(self, alpha, y, v, U, UT_y): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. """ - if self._with_sw: - intercept_dim = np.isclose( - U, self._normalized_sqrt_sw[:, None]).all(axis=0) - else: - intercept_dim = np.var(U, 0) < 1.e-12 + intercept_dim = np.logical_or( + np.isclose(U, self._normalized_sqrt_sw[:, None]).all(axis=0), + np.isclose(U, - self._normalized_sqrt_sw[:, None]).all(axis=0) + ) # detect intercept column w = ((v + alpha) ** -1) - (alpha ** -1) w[intercept_dim] = - (alpha ** -1) @@ -1370,14 +1371,14 @@ def fit(self, X, y, sample_weight=None): if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) self._sqrt_sw = np.sqrt(sample_weight) - self._normalized_sqrt_sw = self._sqrt_sw / np.linalg.norm( - self._sqrt_sw) self._weight_sum = sample_weight.sum() self._with_sw = True else: self._with_sw = False self._sqrt_sw = np.ones(X.shape[0], dtype=X.dtype) - self._weight_sum = X.shape[0] + self._weight_sum = float(X.shape[0]) + self._normalized_sqrt_sw = self._sqrt_sw / np.linalg.norm( + self._sqrt_sw) self._sqrt_sw_matrix = sparse.dia_matrix( (self._sqrt_sw, 0), shape=(X.shape[0], X.shape[0])) n_y = 1 if len(y.shape) == 1 else y.shape[1] diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index cb3108b25d19d..de8990228707c 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -410,6 +410,8 @@ def _test_ridge_loo(filter_): ridge_gcv._sqrt_sw_matrix = sp.dia_matrix( (ridge_gcv._sqrt_sw, 0), shape=(X_diabetes_.shape[0], X_diabetes_.shape[0])) + ridge_gcv._normalized_sqrt_sw = ( + ridge_gcv._sqrt_sw / np.linalg.norm(ridge_gcv._sqrt_sw)) ridge_gcv._weight_sum = X_diabetes_.shape[0] ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept) @@ -991,6 +993,7 @@ def test_errors_and_values_gram(): rng = check_random_state(42) alpha = 1. n = 5 + ridgecv._normalized_sqrt_sw = np.ones(n) / np.sqrt(n) y = rng.randn(n) v = rng.randn(n) Q = rng.randn(len(v), len(v)) @@ -1013,6 +1016,7 @@ def test_errors_and_values_covariance(): rng = check_random_state(42) alpha = 1. for n, p in zip((5, 10), (12, 6)): + ridgecv._normalized_sqrt_sw = np.ones(n) / np.sqrt(n) y = rng.randn(n) v = rng.randn(p) U = rng.randn(n, p) From 1267d22b89a1b8ab06f479e6098d8d644f64d989 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 1 May 2019 17:21:08 +0200 Subject: [PATCH 050/103] docstring --- sklearn/linear_model/ridge.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 72c070d0e81a1..4f1e624599dc5 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1074,9 +1074,9 @@ def _compute_covariance(self, X, center=True): X_mean) def _sparse_multidot_diag(self, X, A, X_mean): - """ - compute the diagonal of (X - Xm).dot(A).dot((X - Xm).T) - when X is sparse, without storing X - Xm nor X.dot(A) + """ compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) + without explicitely centering X nor computing X.dot(A) + when X is sparse. """ intercept_col = self._sqrt_sw scale = self._sqrt_sw From c7a56c6018aa367e5388293bd4e7ced07eed5ce1 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 1 May 2019 18:56:20 +0200 Subject: [PATCH 051/103] find singular vect with smallest angle instead of comparing values --- sklearn/linear_model/ridge.py | 40 ++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 4f1e624599dc5..fc2ec0583660e 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -935,6 +935,24 @@ def _check_gcv_mode(X, gcv_mode): return 'eigen' +def _find_smallest_angle(query, vectors): + """find the column of vectors that is most aligned with query. + + both query and the columns of vectors must have their l2 norm equal to 1. + + Parameters + ---------- + query : ndarray, shape (n,) + Normalized query vector. + + vectors : ndarray, shape (n, m) + Vectors to which we compare query, as columns. Must be normalized. + """ + abs_cosine = np.abs(query.dot(vectors)) + index = np.argmax(abs_cosine) + return index + + class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation @@ -1136,10 +1154,7 @@ def _errors_and_values_gram(self, alpha, y, v, Q, QT_y): # corresponds to the intercept; we cancel the regularization on # this dimension. the corresponding eigenvalue is # sum(sample_weight). - intercept_dim = np.logical_or( - np.isclose(Q, self._normalized_sqrt_sw[:, None]).all(axis=0), - np.isclose(Q, - self._normalized_sqrt_sw[:, None]).all(axis=0) - ) + intercept_dim = _find_smallest_angle(self._normalized_sqrt_sw, Q) w[intercept_dim] = 0 # cancel regularization for the intercept c = np.dot(Q, self._diag_dot(w, QT_y)) @@ -1215,10 +1230,7 @@ def _errors_and_values_covariance_sparse_intercept( # sum(sample_weight), e.g. n when uniform sample weights. intercept_sv = np.zeros(V.shape[0]) intercept_sv[-1] = 1 - intercept_dim = np.logical_or( - np.isclose(V, intercept_sv[:, None]).all(axis=0), - np.isclose(V, - intercept_sv[:, None]).all(axis=0) - ) + intercept_dim = _find_smallest_angle(intercept_sv, V) w = 1 / (s + alpha) w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) @@ -1283,14 +1295,12 @@ def _errors_and_values_covariance_dense(self, alpha, y, v, U, UT_y): """Helper function to avoid code duplication between self._errors_svd and self._values_svd. """ - intercept_dim = np.logical_or( - np.isclose(U, self._normalized_sqrt_sw[:, None]).all(axis=0), - np.isclose(U, - self._normalized_sqrt_sw[:, None]).all(axis=0) - ) - # detect intercept column w = ((v + alpha) ** -1) - (alpha ** -1) - w[intercept_dim] = - (alpha ** -1) - # cancel the regularization for the intercept + if self.fit_intercept: + # detect intercept column + intercept_dim = _find_smallest_angle(self._normalized_sqrt_sw, U) + # cancel the regularization for the intercept + w[intercept_dim] = - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: From 467b8faaa1ac6153cf3889373aade614c47fe81d Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 1 May 2019 18:59:58 +0200 Subject: [PATCH 052/103] reduce rtol in test --- sklearn/linear_model/tests/test_ridge.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index de8990228707c..b65f974705f40 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -387,10 +387,10 @@ def test_ridge_gcv_sample_weights( gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] assert kfold.alpha_ == gcv_ridge.alpha_ - assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) - assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) - assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=5e-2) - assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=5e-2) + assert_allclose(gcv_errors, kfold_errors, rtol=1e-2) + assert_allclose(gcv_errors, kfold_errors, rtol=1e-2) + assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=1e-2) + assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-2) def _test_ridge_loo(filter_): From 6d6ea917c59e3d70c7885726ae890f5211a0c143 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Wed, 1 May 2019 19:42:02 +0200 Subject: [PATCH 053/103] increase rtol to 5e-2 in test --- sklearn/linear_model/tests/test_ridge.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index b65f974705f40..de8990228707c 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -387,10 +387,10 @@ def test_ridge_gcv_sample_weights( gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] assert kfold.alpha_ == gcv_ridge.alpha_ - assert_allclose(gcv_errors, kfold_errors, rtol=1e-2) - assert_allclose(gcv_errors, kfold_errors, rtol=1e-2) - assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=1e-2) - assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-2) + assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) + assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) + assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=5e-2) + assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=5e-2) def _test_ridge_loo(filter_): From b6eb75b975e3ba0e126a32ac17451ad9bb296ec7 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 11:01:26 +0200 Subject: [PATCH 054/103] add test for _check_gcv_mode --- sklearn/linear_model/ridge.py | 7 ++++--- sklearn/linear_model/tests/test_ridge.py | 8 ++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index fc2ec0583660e..5a87563c2c1ff 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -920,13 +920,14 @@ def classes_(self): def _check_gcv_mode(X, gcv_mode): - possible_gcv_modes = {None, 'auto', 'svd', 'eigen'} - if gcv_mode not in possible_gcv_modes: + possible_gcv_modes = [None, 'auto', 'svd', 'eigen'] + if (gcv_mode is not None and not isinstance(gcv_mode, str)) or ( + gcv_mode not in possible_gcv_modes): raise ValueError( "Unknown value for 'gcv_mode'. " "Got {} instead of one of {}" .format( gcv_mode, possible_gcv_modes)) - if gcv_mode not in {None, 'auto'}: + if gcv_mode not in [None, 'auto']: return gcv_mode # if X has more rows than columns, use decomposition of X^T.X, # otherwise X.X^T diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index de8990228707c..3138ad32b2b57 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -393,6 +393,14 @@ def test_ridge_gcv_sample_weights( assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=5e-2) +def test_ridge_gcv_bad_gcv_mode(): + x, y = make_regression() + for mode in [True, 1, 5, 'bad', 'gcv', np.arange(3)]: + gcv = RidgeCV(gcv_mode=mode) + assert_raises_regex( + ValueError, "Unknown value for 'gcv_mode'", gcv.fit, x, y) + + def _test_ridge_loo(filter_): # test that can work with both dense or sparse matrices n_samples = X_diabetes.shape[0] From d4fde086cc62885bd4550e75d4201c9017306d0e Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 11:11:32 +0200 Subject: [PATCH 055/103] remove temp variables after fitting ridge gcv --- sklearn/linear_model/ridge.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 5a87563c2c1ff..b3ca9da3fbf2b 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1329,6 +1329,13 @@ def _values_covariance_dense(self, alpha, y, v, U, UT_y): alpha, y, v, U, UT_y) return y - (c / G_diag), c + def _remove_temp_vars(self): + for var_name in [ + '_X_offset', '_sqrt_sw', '_weight_sum', '_with_sw', + '_normalized_sqrt_sw', '_sqrt_sw_matrix']: + if hasattr(self, var_name): + delattr(self, var_name) + def fit(self, X, y, sample_weight=None): """Fit Ridge regression model @@ -1436,6 +1443,7 @@ def identity_estimator(): cv_values_shape = n_samples, n_y, len(self.alphas) self.cv_values_ = cv_values.reshape(cv_values_shape) + self._remove_temp_vars() return self From 8450c6f1f49f65db5a3bb5d16edc5dcfafcec1a3 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 11:20:28 +0200 Subject: [PATCH 056/103] more tests --- sklearn/linear_model/tests/test_ridge.py | 27 ++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 3138ad32b2b57..ce941cb003386 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -33,6 +33,7 @@ from sklearn.linear_model.ridge import RidgeClassifierCV from sklearn.linear_model.ridge import _solve_cholesky from sklearn.linear_model.ridge import _solve_cholesky_kernel +from sklearn.linear_model.ridge import _check_gcv_mode from sklearn.datasets import make_regression from sklearn.model_selection import GridSearchCV @@ -393,12 +394,34 @@ def test_ridge_gcv_sample_weights( assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=5e-2) -def test_ridge_gcv_bad_gcv_mode(): - x, y = make_regression() +def test_check_gcv_mode(): + x, y = make_regression(n_samples=5, n_features=2) for mode in [True, 1, 5, 'bad', 'gcv', np.arange(3)]: gcv = RidgeCV(gcv_mode=mode) assert_raises_regex( ValueError, "Unknown value for 'gcv_mode'", gcv.fit, x, y) + assert_raises_regex( + ValueError, "Unknown value for 'gcv_mode'", _check_gcv_mode, + x, mode) + assert _check_gcv_mode(x, None) == 'svd' + assert _check_gcv_mode(x, 'auto') == 'svd' + assert _check_gcv_mode(x, 'eigen') == 'eigen' + assert _check_gcv_mode(x, 'svd') == 'svd' + + assert _check_gcv_mode(x.T, None) == 'eigen' + assert _check_gcv_mode(x.T, 'auto') == 'eigen' + assert _check_gcv_mode(x.T, 'eigen') == 'eigen' + assert _check_gcv_mode(x.T, 'svd') == 'svd' + + +def test_ridgecv_store_cv_values(): + x, y = make_regression(n_samples=10, n_features=2) + cv = RidgeCV(cv=3, store_cv_values=True) + assert_raises_regex(ValueError, 'cv!=None and store_cv_values', + cv.fit, x, y) + gcv = RidgeCV(cv=None, store_cv_values=True) + gcv.fit(x, y) + assert hasattr(gcv, cv_values_) def _test_ridge_loo(filter_): From 1dad573665195d352b1325c4c09982eaa35769ec Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 11:29:06 +0200 Subject: [PATCH 057/103] merge tests for gcv store_cv_values --- sklearn/linear_model/tests/test_ridge.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index ce941cb003386..ea62d20147e6b 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -414,16 +414,6 @@ def test_check_gcv_mode(): assert _check_gcv_mode(x.T, 'svd') == 'svd' -def test_ridgecv_store_cv_values(): - x, y = make_regression(n_samples=10, n_features=2) - cv = RidgeCV(cv=3, store_cv_values=True) - assert_raises_regex(ValueError, 'cv!=None and store_cv_values', - cv.fit, x, y) - gcv = RidgeCV(cv=None, store_cv_values=True) - gcv.fit(x, y) - assert hasattr(gcv, cv_values_) - - def _test_ridge_loo(filter_): # test that can work with both dense or sparse matrices n_samples = X_diabetes.shape[0] @@ -751,6 +741,10 @@ def test_ridgecv_store_cv_values(): r.fit(x, y) assert r.cv_values_.shape == (n_samples, n_targets, n_alphas) + r = RidgeCV(cv=3, store_cv_values=True) + assert_raises_regex(ValueError, 'cv!=None and store_cv_values', + r.fit, x, y) + @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_ridge_classifier_cv_store_cv_values(): From 8a5712d25090392d958690f082e63e4ba8891320 Mon Sep 17 00:00:00 2001 From: Olivier Grisel Date: Thu, 2 May 2019 11:36:21 +0200 Subject: [PATCH 058/103] Update sklearn/linear_model/tests/test_ridge.py Co-Authored-By: jeromedockes --- sklearn/linear_model/tests/test_ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index ea62d20147e6b..2ceb655f2e068 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -318,7 +318,7 @@ def test_ridge_individual_penalties(): @pytest.mark.parametrize('fit_intercept', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('noise', [1., 30.]) -def test_ridge_gcv_vs_k_fold(gcv_mode, X_constructor, X_shape, fit_intercept, +def test_ridge_gcv_vs_ridge_loo_cv(gcv_mode, X_constructor, X_shape, fit_intercept, normalize, noise): n_samples, n_features = X_shape X, y = make_regression( From bb2545847f31596884945e2546dc37134ad27e96 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 11:38:05 +0200 Subject: [PATCH 059/103] pep8 --- sklearn/linear_model/tests/test_ridge.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 2ceb655f2e068..b19b226f6b9b3 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -318,8 +318,8 @@ def test_ridge_individual_penalties(): @pytest.mark.parametrize('fit_intercept', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('noise', [1., 30.]) -def test_ridge_gcv_vs_ridge_loo_cv(gcv_mode, X_constructor, X_shape, fit_intercept, - normalize, noise): +def test_ridge_gcv_vs_ridge_loo_cv( + gcv_mode, X_constructor, X_shape, fit_intercept, normalize, noise): n_samples, n_features = X_shape X, y = make_regression( n_samples=n_samples, n_features=n_features, n_targets=3, From c4303a0908f60d8df117ab166f5f84c9b7156722 Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 2 May 2019 11:42:13 +0200 Subject: [PATCH 060/103] Update sklearn/linear_model/ridge.py Co-Authored-By: jeromedockes --- sklearn/linear_model/ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index b3ca9da3fbf2b..edd53745d914a 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -937,7 +937,7 @@ def _check_gcv_mode(X, gcv_mode): def _find_smallest_angle(query, vectors): - """find the column of vectors that is most aligned with query. + """Find the column of vectors that is most aligned with the query. both query and the columns of vectors must have their l2 norm equal to 1. From b964db06e0c31ee65bb3898e49a1300a54fb836a Mon Sep 17 00:00:00 2001 From: Guillaume Lemaitre Date: Thu, 2 May 2019 11:43:56 +0200 Subject: [PATCH 061/103] Apply suggestions from code review Co-Authored-By: jeromedockes --- sklearn/linear_model/ridge.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index edd53745d914a..4621b3bc76bab 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -939,14 +939,14 @@ def _check_gcv_mode(X, gcv_mode): def _find_smallest_angle(query, vectors): """Find the column of vectors that is most aligned with the query. - both query and the columns of vectors must have their l2 norm equal to 1. + Both query and the columns of vectors must have their l2 norm equal to 1. Parameters ---------- - query : ndarray, shape (n,) + query : ndarray, shape (n_samples,) Normalized query vector. - vectors : ndarray, shape (n, m) + vectors : ndarray, shape (n_samples, n_features) Vectors to which we compare query, as columns. Must be normalized. """ abs_cosine = np.abs(query.dot(vectors)) @@ -1093,7 +1093,7 @@ def _compute_covariance(self, X, center=True): X_mean) def _sparse_multidot_diag(self, X, A, X_mean): - """ compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) + """Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) without explicitely centering X nor computing X.dot(A) when X is sparse. """ From 49fb1400bf630eb20ed3b550bb5e928b675a263b Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 11:49:14 +0200 Subject: [PATCH 062/103] docstring --- sklearn/linear_model/ridge.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 4621b3bc76bab..d1eda264826b5 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1096,6 +1096,19 @@ def _sparse_multidot_diag(self, X, A, X_mean): """Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) without explicitely centering X nor computing X.dot(A) when X is sparse. + + Parameters + ---------- + X : sparse matrix, shape = (n_samples, n_features) + + A : np.ndarray, shape = (n_features, n_features) + + X_mean : np.ndarray, shape = (n_features,) + + Returns + ------- + diag : np.ndarray, shape = (n_samples,) + The computed diagonal. """ intercept_col = self._sqrt_sw scale = self._sqrt_sw From e1b5d4e53ab6d85c19e0faeeed171abda93c6629 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 11:52:00 +0200 Subject: [PATCH 063/103] doc --- sklearn/linear_model/ridge.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index d1eda264826b5..800a5d4b62230 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1135,6 +1135,7 @@ def _set_intercept(self, X_offset, y_offset, X_scale): super()._set_intercept(X_offset, y_offset, X_scale) def _decompose_gram(self, X, y): + """Eigendecomposition of X.X^T, used when n_samples <= n_features""" # if X is dense it has already been centered in preprocessing center = self.fit_intercept and sparse.issparse(X) K, X_m = self._compute_gram(X, center) @@ -1187,6 +1188,7 @@ def _values_gram(self, alpha, y, v, Q, QT_y): return y - (c / G_diag), c def _decompose_covariance_sparse(self, X, y): + """Eigendecomposition of X^T.X, used when n_samples > n_features.""" n_samples, n_features = X.shape cov = np.empty((n_features + 1, n_features + 1)) cov[:-1, :-1], self._X_offset = self._compute_covariance( From afee4638f3ecc22e9ca550da21bedb06a5a4c73d Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 12:01:18 +0200 Subject: [PATCH 064/103] _errors_and_values docstrings --- sklearn/linear_model/ridge.py | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 800a5d4b62230..96c6353642ea1 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1155,12 +1155,9 @@ def _decompose_gram(self, X, y): return v, Q, QT_y def _errors_and_values_gram(self, alpha, y, v, Q, QT_y): - """Helper function to avoid code duplication between self._errors and - self._values. + """Compute dual coefficients and diagonal of (Identity - Hat_matrix) - Notes - ----- - We don't construct matrix G, instead compute action on y & diagonal. + Used when we have a decomposition of X.X^T (n_features >= n_samples). """ w = 1. / (v + alpha) if self.fit_intercept: @@ -1216,7 +1213,11 @@ def _decompose_covariance_sparse(self, X, y): def _errors_and_values_covariance_sparse_no_intercept( self, alpha, y, s, V, X): - """compute loo values and dual coef when X is sparse""" + """Compute dual coefficients and diagonal of (Identity - Hat_matrix) + + Used when we have a decomposition of X^T.X + (n_features < n_samples and X is sparse), and not fitting an intercept. + """ n_samples, n_features = X.shape w = 1 / (s + alpha) A = (V * w).dot(V.T) @@ -1231,12 +1232,11 @@ def _errors_and_values_covariance_sparse_no_intercept( def _errors_and_values_covariance_sparse_intercept( self, alpha, y, s, V, X): - """Helper function to avoid code duplication between self._errors_svd - and self._values_svd. - - compute loo values and dual coef when X is sparse and we fit an - intercept. + """Compute dual coefficients and diagonal of (Identity - Hat_matrix) + Used when we have a decomposition of X^T.X + (n_features < n_samples and X is sparse), + and we are fitting an intercept. """ n_samples, n_features = X.shape # the vector [0, 0, ..., 0, 1] @@ -1287,6 +1287,11 @@ def rmatvec(v): return (1 - hat_diag) / alpha, (y - y_hat) / alpha def _errors_and_values_covariance_sparse(self, alpha, y, s, V, X): + """Compute dual coefficients and diagonal of (Identity - Hat_matrix) + + Used when we have a decomposition of X^T.X + (n_features < n_samples and X is sparse). + """ if self.fit_intercept: return self._errors_and_values_covariance_sparse_intercept( alpha, y, s, V, X) @@ -1308,8 +1313,10 @@ def _decompose_covariance_dense(self, X, y): return v, U, UT_y def _errors_and_values_covariance_dense(self, alpha, y, v, U, UT_y): - """Helper function to avoid code duplication between self._errors_svd - and self._values_svd. + """Compute dual coefficients and diagonal of (Identity - Hat_matrix) + + Used when we have an SVD decomposition of X + (n_features >= n_samples and X is dense). """ w = ((v + alpha) ** -1) - (alpha ** -1) if self.fit_intercept: From a0e52844563a6290fb072391d56d2a3b23a43649 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 12:04:30 +0200 Subject: [PATCH 065/103] note on use of sample weights for computing the cv score --- sklearn/linear_model/ridge.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 96c6353642ea1..78ab3f9f5e69d 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1499,6 +1499,14 @@ def fit(self, X, y, sample_weight=None): Returns ------- self : object + + Notes + ----- + When sample_weight is provided, the selected hyperparameter may depend + on whether we use generalized cross-validation (cv=None or cv='auto') + or another form of cross-validation, because only generalized + cross-validation takes the sample weights into account when computing + the validation score. """ cv = self.cv if cv is None: @@ -1522,9 +1530,6 @@ def fit(self, X, y, sample_weight=None): normalize=self.normalize, solver=solver), parameters, cv=cv, scoring=self.scoring) - # note: unlike when using gcv, sample weights won't be used - # to compute the validation score so selected hyperparameter - # may differ gs.fit(X, y, sample_weight=sample_weight) estimator = gs.best_estimator_ self.alpha_ = gs.best_estimator_.alpha From dc7bec10025f52d0cb956d3cdc19ca896dab56b7 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 12:10:54 +0200 Subject: [PATCH 066/103] parametrize check_gcv_mode test as suggested by @glemaitre --- sklearn/linear_model/tests/test_ridge.py | 40 +++++++++++++----------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index b19b226f6b9b3..49fa831fc9baa 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -394,24 +394,28 @@ def test_ridge_gcv_sample_weights( assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=5e-2) -def test_check_gcv_mode(): - x, y = make_regression(n_samples=5, n_features=2) - for mode in [True, 1, 5, 'bad', 'gcv', np.arange(3)]: - gcv = RidgeCV(gcv_mode=mode) - assert_raises_regex( - ValueError, "Unknown value for 'gcv_mode'", gcv.fit, x, y) - assert_raises_regex( - ValueError, "Unknown value for 'gcv_mode'", _check_gcv_mode, - x, mode) - assert _check_gcv_mode(x, None) == 'svd' - assert _check_gcv_mode(x, 'auto') == 'svd' - assert _check_gcv_mode(x, 'eigen') == 'eigen' - assert _check_gcv_mode(x, 'svd') == 'svd' - - assert _check_gcv_mode(x.T, None) == 'eigen' - assert _check_gcv_mode(x.T, 'auto') == 'eigen' - assert _check_gcv_mode(x.T, 'eigen') == 'eigen' - assert _check_gcv_mode(x.T, 'svd') == 'svd' +@pytest.mark.parametrize('mode', [True, 1, 5, 'bad', 'gcv', np.arange(3)]) +def test_check_gcv_mode_error(mode): + X, y = make_regression(n_samples=5, n_features=2) + gcv = RidgeCV(gcv_mode=mode) + with pytest.raises(ValueError, match="Unknown value for 'gcv_mode'"): + gcv.fit(X, y) + _check_gcv_mode(X, mode) + + +@pytest.mark.parametrize( + 'mode, mode_samples_sup_features, mode_features_sup_samples', + [(None, 'svd', 'eigen'), + ('auto', 'svd', 'eigen'), + ('eigen', 'eigen', 'eigen'), + ('svd', 'svd', 'svd')] +) +def test_check_gcv_mode_choice(mode, mode_samples_sup_features, + mode_features_sup_samples): + X, _ = make_regression(n_samples=5, n_features=2) + + assert _check_gcv_mode(X, mode) == mode_samples_sup_features + assert _check_gcv_mode(X.T, mode) == mode_features_sup_samples def _test_ridge_loo(filter_): From 73404a9b8bfa961908ac1cc44e096f4b14d16347 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 13:17:32 +0200 Subject: [PATCH 067/103] don't store temp variables computed in ridgegcv fit in private attributes --- sklearn/linear_model/ridge.py | 162 +++++++++++------------ sklearn/linear_model/tests/test_ridge.py | 50 ++++--- 2 files changed, 104 insertions(+), 108 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 78ab3f9f5e69d..b29038362d7d6 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1016,7 +1016,7 @@ def _diag_dot(self, D, B): D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B - def _compute_gram(self, X, center=True): + def _compute_gram(self, X, sqrt_sw, center=True): """Computes the Gram matrix with possible centering. If ``center`` is ``True``, compute @@ -1027,6 +1027,10 @@ def _compute_gram(self, X, center=True): ---------- X : ndarray, shape (n_samples, n_features) The input uncentered data. + + sqrt_sw : ndarray, shape (n_samples,) + square roots of sample weights + center : bool, default is True Whether or not to remove the mean from ``X``. @@ -1034,7 +1038,7 @@ def _compute_gram(self, X, center=True): ------- gram : ndarray, shape (n_samples, n_samples) The Gram matrix. - X_m : ndarray, shape (n_feature,) + X_mean : ndarray, shape (n_feature,) The mean of ``X`` for each feature. """ if not center: @@ -1044,19 +1048,21 @@ def _compute_gram(self, X, center=True): return safe_sparse_dot(X, X.T, dense_output=True), X_mean # otherwise X is always sparse n_samples = X.shape[0] - X_weighted = self._sqrt_sw_matrix.dot(X) + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples)) + X_weighted = sample_weight_matrix.dot(X) X_mean, _ = mean_variance_axis(X_weighted, axis=0) - X_mean *= n_samples / self._weight_sum - X_mX = self._sqrt_sw[:, None] * safe_sparse_dot( + X_mean *= n_samples / sqrt_sw.dot(sqrt_sw) + X_mX = sqrt_sw[:, None] * safe_sparse_dot( X_mean, X.T, dense_output=True) X_mX_m = np.empty((n_samples, n_samples), dtype=X.dtype) X_mX_m[:, :] = np.dot(X_mean, X_mean) - X_mX_m *= self._sqrt_sw - X_mX_m *= self._sqrt_sw[:, None] + X_mX_m *= sqrt_sw + X_mX_m *= sqrt_sw[:, None] return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, X_mean) - def _compute_covariance(self, X, center=True): + def _compute_covariance(self, X, sqrt_sw, center=True): """Computes centered covariance matrix. If ``center`` is ``True``, compute @@ -1068,6 +1074,10 @@ def _compute_covariance(self, X, center=True): ---------- X : ndarray, shape (n_samples, n_features) The input uncentered data. + + sqrt_sw : ndarray, shape (n_samples,) + square roots of sample weights + center : bool, default is True Whether or not to remove the mean from ``X``. @@ -1075,7 +1085,7 @@ def _compute_covariance(self, X, center=True): ------- covariance : ndarray, shape (n_features, n_features) The covariance matrix. - X_m : ndarray, shape (n_feature,) + X_mean : ndarray, shape (n_feature,) The mean of ``X`` for each feature. """ if not center: @@ -1085,14 +1095,17 @@ def _compute_covariance(self, X, center=True): return safe_sparse_dot(X.T, X, dense_output=True), X_mean # otherwise X is always sparse n_samples = X.shape[0] - X_weighted = self._sqrt_sw_matrix.dot(X) + sample_weight_matrix = sparse.dia_matrix( + (sqrt_sw, 0), shape=(n_samples, n_samples)) + X_weighted = sample_weight_matrix.dot(X) X_mean, _ = mean_variance_axis(X_weighted, axis=0) - X_mean = X_mean * n_samples / self._weight_sum + X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw) + weight_sum = sqrt_sw.dot(sqrt_sw) return (safe_sparse_dot(X.T, X, dense_output=True) - - self._weight_sum * np.outer(X_mean, X_mean), + weight_sum * np.outer(X_mean, X_mean), X_mean) - def _sparse_multidot_diag(self, X, A, X_mean): + def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): """Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T) without explicitely centering X nor computing X.dot(A) when X is sparse. @@ -1105,13 +1118,16 @@ def _sparse_multidot_diag(self, X, A, X_mean): X_mean : np.ndarray, shape = (n_features,) + sqrt_sw : np.ndarray, shape = (n_features,) + square roots of sample weights + Returns ------- diag : np.ndarray, shape = (n_samples,) The computed diagonal. """ - intercept_col = self._sqrt_sw - scale = self._sqrt_sw + intercept_col = sqrt_sw + scale = sqrt_sw batch_size = X.shape[1] diag = np.empty(X.shape[0]) for start in range(0, X.shape[0], batch_size): @@ -1134,27 +1150,23 @@ def _set_intercept(self, X_offset, y_offset, X_scale): X_offset += self._X_offset * X_scale super()._set_intercept(X_offset, y_offset, X_scale) - def _decompose_gram(self, X, y): + def _decompose_gram(self, X, y, sqrt_sw): """Eigendecomposition of X.X^T, used when n_samples <= n_features""" # if X is dense it has already been centered in preprocessing center = self.fit_intercept and sparse.issparse(X) - K, X_m = self._compute_gram(X, center) + K, X_m = self._compute_gram(X, sqrt_sw, center=center) if self.fit_intercept: - if self._with_sw: - # to emulate centering X with sample weights, - # ie removing the weighted average, we add a column - # containing the square roots of the sample weights. - # by centering, it is orthogonal to the other columns - K += np.outer(self._sqrt_sw, self._sqrt_sw) - else: - # with uniform sample weights we add a column of 1 - K += 1. + # to emulate centering X with sample weights, + # ie removing the weighted average, we add a column + # containing the square roots of the sample weights. + # by centering, it is orthogonal to the other columns + K += np.outer(sqrt_sw, sqrt_sw) v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) self._X_offset = X_m return v, Q, QT_y - def _errors_and_values_gram(self, alpha, y, v, Q, QT_y): + def _errors_and_values_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X.X^T (n_features >= n_samples). @@ -1166,7 +1178,8 @@ def _errors_and_values_gram(self, alpha, y, v, Q, QT_y): # corresponds to the intercept; we cancel the regularization on # this dimension. the corresponding eigenvalue is # sum(sample_weight). - intercept_dim = _find_smallest_angle(self._normalized_sqrt_sw, Q) + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, Q) w[intercept_dim] = 0 # cancel regularization for the intercept c = np.dot(Q, self._diag_dot(w, QT_y)) @@ -1176,20 +1189,20 @@ def _errors_and_values_gram(self, alpha, y, v, Q, QT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_gram(self, alpha, y, v, Q, QT_y): - G_diag, c = self._errors_and_values_gram(alpha, y, v, Q, QT_y) + def _errors_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): + G_diag, c = self._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) return (c / G_diag) ** 2, c - def _values_gram(self, alpha, y, v, Q, QT_y): - G_diag, c = self._errors_and_values_gram(alpha, y, v, Q, QT_y) + def _values_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): + G_diag, c = self._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) return y - (c / G_diag), c - def _decompose_covariance_sparse(self, X, y): + def _decompose_covariance_sparse(self, X, y, sqrt_sw): """Eigendecomposition of X^T.X, used when n_samples > n_features.""" n_samples, n_features = X.shape cov = np.empty((n_features + 1, n_features + 1)) cov[:-1, :-1], self._X_offset = self._compute_covariance( - X, self.fit_intercept) + X, sqrt_sw, center=self.fit_intercept) if not self.fit_intercept: cov = cov[:-1, :-1] # to emulate centering X with sample weights, @@ -1200,10 +1213,7 @@ def _decompose_covariance_sparse(self, X, y): else: cov[-1] = 0 cov[:, -1] = 0 - if self._with_sw: - cov[-1, -1] = self._weight_sum - else: - cov[-1, -1] = n_samples + cov[-1, -1] = sqrt_sw.dot(sqrt_sw) kernel_size = max(0, X.shape[1] - X.shape[0]) s, V = linalg.eigh(cov) # remove eigenvalues and vectors in the null space of X^T.X @@ -1212,7 +1222,7 @@ def _decompose_covariance_sparse(self, X, y): return s, V, X def _errors_and_values_covariance_sparse_no_intercept( - self, alpha, y, s, V, X): + self, alpha, y, sqrt_sw, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X^T.X @@ -1224,14 +1234,14 @@ def _errors_and_values_covariance_sparse_no_intercept( X_mean = self._X_offset AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) y_hat = safe_sparse_dot(X, AXy, dense_output=True) - hat_diag = self._sparse_multidot_diag(X, A, X_mean) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) if len(y.shape) != 1: # handle case where y is 2-d hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha def _errors_and_values_covariance_sparse_intercept( - self, alpha, y, s, V, X): + self, alpha, y, sqrt_sw, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X^T.X @@ -1252,25 +1262,24 @@ def _errors_and_values_covariance_sparse_intercept( A = (V * w).dot(V.T) X_mean = self._X_offset # add a column to X containing the square roots of sample weights - sw = self._sqrt_sw if self._with_sw else np.ones( - X.shape[0], dtype=X.dtype) def matvec(v): return safe_sparse_dot( X, v[:-1], dense_output=True - ) - sw * X_mean.dot(v[:-1]) + v[-1] * sw + ) - sqrt_sw * X_mean.dot(v[:-1]) + v[-1] * sqrt_sw def matmat(v): - return safe_sparse_dot( - X, v[:-1], dense_output=True - ) - sw[:, None] * X_mean.dot(v[:-1]) + v[-1] * sw[:, None] + return ( + safe_sparse_dot(X, v[:-1], dense_output=True) - + sqrt_sw[:, None] * X_mean.dot(v[:-1]) + v[-1] * + sqrt_sw[:, None]) def rmatvec(v): v = v.ravel() res = np.empty(n_features + 1) res[:-1] = safe_sparse_dot( - X.T, v, dense_output=True) - X_mean * sw.dot(v) - res[-1] = np.dot(v, sw) + X.T, v, dense_output=True) - X_mean * sqrt_sw.dot(v) + res[-1] = np.dot(v, sqrt_sw) return res Xop = sparse.linalg.LinearOperator( @@ -1279,14 +1288,14 @@ def rmatvec(v): ) AXy = A.dot(Xop.adjoint().dot(y)) y_hat = Xop.dot(AXy) - hat_diag = self._sparse_multidot_diag(X, A, X_mean) + hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) # return (1 - hat_diag), (y - y_hat) if len(y.shape) != 1: # handle case where y is 2-d hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _errors_and_values_covariance_sparse(self, alpha, y, s, V, X): + def _errors_and_values_covariance_sparse(self, alpha, y, sqrt_sw, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X^T.X @@ -1294,16 +1303,13 @@ def _errors_and_values_covariance_sparse(self, alpha, y, s, V, X): """ if self.fit_intercept: return self._errors_and_values_covariance_sparse_intercept( - alpha, y, s, V, X) + alpha, y, sqrt_sw, s, V, X) return self._errors_and_values_covariance_sparse_no_intercept( - alpha, y, s, V, X) + alpha, y, sqrt_sw, s, V, X) - def _decompose_covariance_dense(self, X, y): + def _decompose_covariance_dense(self, X, y, sqrt_sw): if self.fit_intercept: - if self._with_sw: - intercept = self._sqrt_sw[:, None] - else: - intercept = np.ones((X.shape[0], 1)) + intercept = sqrt_sw[:, None] X = np.hstack((X, intercept)) # to emulate fit_intercept=True situation, add a column on ones # Note that by centering, the other columns are orthogonal to that one @@ -1312,7 +1318,8 @@ def _decompose_covariance_dense(self, X, y): UT_y = np.dot(U.T, y) return v, U, UT_y - def _errors_and_values_covariance_dense(self, alpha, y, v, U, UT_y): + def _errors_and_values_covariance_dense( + self, alpha, y, sqrt_sw, v, U, UT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have an SVD decomposition of X @@ -1321,7 +1328,8 @@ def _errors_and_values_covariance_dense(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) if self.fit_intercept: # detect intercept column - intercept_dim = _find_smallest_angle(self._normalized_sqrt_sw, U) + normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) + intercept_dim = _find_smallest_angle(normalized_sw, U) # cancel the regularization for the intercept w[intercept_dim] = - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y @@ -1331,24 +1339,24 @@ def _errors_and_values_covariance_dense(self, alpha, y, v, U, UT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_covariance_sparse(self, alpha, y, v, U, UT_y): + def _errors_covariance_sparse(self, alpha, y, sqrt_sw, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_sparse( - alpha, y, v, U, UT_y) + alpha, y, sqrt_sw, v, U, UT_y) return (c / G_diag) ** 2, c - def _values_covariance_sparse(self, alpha, y, v, U, UT_y): + def _values_covariance_sparse(self, alpha, y, sqrt_sw, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_sparse( - alpha, y, v, U, UT_y) + alpha, y, sqrt_sw, v, U, UT_y) return y - (c / G_diag), c - def _errors_covariance_dense(self, alpha, y, v, U, UT_y): + def _errors_covariance_dense(self, alpha, y, sqrt_sw, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_dense( - alpha, y, v, U, UT_y) + alpha, y, sqrt_sw, v, U, UT_y) return (c / G_diag) ** 2, c - def _values_covariance_dense(self, alpha, y, v, U, UT_y): + def _values_covariance_dense(self, alpha, y, sqrt_sw, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_dense( - alpha, y, v, U, UT_y) + alpha, y, sqrt_sw, v, U, UT_y) return y - (c / G_diag), c def _remove_temp_vars(self): @@ -1410,17 +1418,9 @@ def fit(self, X, y, sample_weight=None): if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) - self._sqrt_sw = np.sqrt(sample_weight) - self._weight_sum = sample_weight.sum() - self._with_sw = True + sqrt_sw = np.sqrt(sample_weight) else: - self._with_sw = False - self._sqrt_sw = np.ones(X.shape[0], dtype=X.dtype) - self._weight_sum = float(X.shape[0]) - self._normalized_sqrt_sw = self._sqrt_sw / np.linalg.norm( - self._sqrt_sw) - self._sqrt_sw_matrix = sparse.dia_matrix( - (self._sqrt_sw, 0), shape=(X.shape[0], X.shape[0])) + sqrt_sw = np.ones(X.shape[0], dtype=X.dtype) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] @@ -1428,12 +1428,12 @@ def fit(self, X, y, sample_weight=None): scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None - decomposition = _decompose(X, y) + decomposition = _decompose(X, y, sqrt_sw) for i, alpha in enumerate(self.alphas): if error: - out, c = _errors(float(alpha), y, *decomposition) + out, c = _errors(float(alpha), y, sqrt_sw, *decomposition) else: - out, c = _values(float(alpha), y, *decomposition) + out, c = _values(float(alpha), y, sqrt_sw, *decomposition) cv_values[:, i] = out.ravel() C.append(c) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 49fa831fc9baa..2fffa514eb946 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -430,22 +430,15 @@ def _test_ridge_loo(filter_): else: X_diabetes_ = X_diabetes ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept) - ridge_gcv._with_sw = False - ridge_gcv._sqrt_sw = np.ones(X_diabetes_.shape[0]) - ridge_gcv._sqrt_sw_matrix = sp.dia_matrix( - (ridge_gcv._sqrt_sw, 0), - shape=(X_diabetes_.shape[0], X_diabetes_.shape[0])) - ridge_gcv._normalized_sqrt_sw = ( - ridge_gcv._sqrt_sw / np.linalg.norm(ridge_gcv._sqrt_sw)) - ridge_gcv._weight_sum = X_diabetes_.shape[0] + sqrt_sw = np.ones(X_diabetes_.shape[0]) ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept) # because fit_intercept is applied # generalized cross-validation (efficient leave-one-out) - decomp = ridge_gcv._decompose_gram(X_diabetes_, y_diabetes) - errors, c = ridge_gcv._errors_gram(1.0, y_diabetes, *decomp) - values, c = ridge_gcv._values_gram(1.0, y_diabetes, *decomp) + decomp = ridge_gcv._decompose_gram(X_diabetes_, y_diabetes, sqrt_sw) + errors, c = ridge_gcv._errors_gram(1.0, y_diabetes, sqrt_sw, *decomp) + values, c = ridge_gcv._values_gram(1.0, y_diabetes, sqrt_sw, *decomp) # brute-force leave-one-out: remove one example at a time errors2 = [] @@ -466,11 +459,12 @@ def _test_ridge_loo(filter_): # generalized cross-validation (efficient leave-one-out, # SVD variation) - decomp = ridge_gcv._decompose_covariance_dense(X_diabetes_, y_diabetes) + decomp = ridge_gcv._decompose_covariance_dense( + X_diabetes_, y_diabetes, sqrt_sw) errors3, c = ridge_gcv._errors_covariance_dense( - ridge.alpha, y_diabetes, *decomp) + ridge.alpha, y_diabetes, sqrt_sw, *decomp) values3, c = ridge_gcv._values_covariance_dense( - ridge.alpha, y_diabetes, *decomp) + ridge.alpha, y_diabetes, sqrt_sw, *decomp) # check that efficient and SVD efficient LOO give same results assert errors == pytest.approx(errors3) @@ -479,18 +473,18 @@ def _test_ridge_loo(filter_): # generalized cross-validation (efficient leave-one-out, # SVD variation) decomp = ridge_gcv._decompose_covariance_sparse( - sp.csr_matrix(X_diabetes_), y_diabetes) + sp.csr_matrix(X_diabetes_), y_diabetes, sqrt_sw) errors4, c = ridge_gcv._errors_covariance_sparse( - ridge.alpha, y_diabetes, *decomp) + ridge.alpha, y_diabetes, sqrt_sw, *decomp) values4, c = ridge_gcv._values_covariance_sparse( - ridge.alpha, y_diabetes, *decomp) + ridge.alpha, y_diabetes, sqrt_sw, *decomp) # check that efficient and SVD efficient LOO give same results assert errors == pytest.approx(errors4) assert values == pytest.approx(values4) # check best alpha - ridge_gcv.fit(filter_(X_diabetes), y_diabetes) + ridge_gcv.fit(filter_(X_diabetes), y_diabetes, sqrt_sw) alpha_ = ridge_gcv.alpha_ ret.append(alpha_) @@ -498,7 +492,7 @@ def _test_ridge_loo(filter_): f = ignore_warnings scoring = make_scorer(mean_squared_error, greater_is_better=False) ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring) - f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes) + f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes, sqrt_sw) assert ridge_gcv2.alpha_ == pytest.approx(alpha_) # check that we get same best alpha with custom score_func @@ -1022,19 +1016,19 @@ def test_errors_and_values_gram(): rng = check_random_state(42) alpha = 1. n = 5 - ridgecv._normalized_sqrt_sw = np.ones(n) / np.sqrt(n) + sqrt_sw = np.ones(n) y = rng.randn(n) v = rng.randn(n) Q = rng.randn(len(v), len(v)) QT_y = Q.T.dot(y) - G_diag, c = ridgecv._errors_and_values_gram(alpha, y, v, Q, QT_y) + G_diag, c = ridgecv._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) # test that helper function behaves as expected - out, c_ = ridgecv._errors_gram(alpha, y, v, Q, QT_y) + out, c_ = ridgecv._errors_gram(alpha, y, sqrt_sw, v, Q, QT_y) np.testing.assert_array_equal(out, (c / G_diag) ** 2) np.testing.assert_array_equal(c, c) - out, c_ = ridgecv._values_gram(alpha, y, v, Q, QT_y) + out, c_ = ridgecv._values_gram(alpha, y, sqrt_sw, v, Q, QT_y) np.testing.assert_array_equal(out, y - (c / G_diag)) np.testing.assert_array_equal(c_, c) @@ -1045,20 +1039,22 @@ def test_errors_and_values_covariance(): rng = check_random_state(42) alpha = 1. for n, p in zip((5, 10), (12, 6)): - ridgecv._normalized_sqrt_sw = np.ones(n) / np.sqrt(n) + sqrt_sw = np.ones(n) y = rng.randn(n) v = rng.randn(p) U = rng.randn(n, p) UT_y = U.T.dot(y) G_diag, c = ridgecv._errors_and_values_covariance_dense( - alpha, y, v, U, UT_y) + alpha, y, sqrt_sw, v, U, UT_y) # test that helper function behaves as expected - out, c_ = ridgecv._errors_covariance_dense(alpha, y, v, U, UT_y) + out, c_ = ridgecv._errors_covariance_dense( + alpha, y, sqrt_sw, v, U, UT_y) np.testing.assert_array_equal(out, (c / G_diag) ** 2) np.testing.assert_array_equal(c, c) - out, c_ = ridgecv._values_covariance_dense(alpha, y, v, U, UT_y) + out, c_ = ridgecv._values_covariance_dense( + alpha, y, sqrt_sw, v, U, UT_y) np.testing.assert_array_equal(out, y - (c / G_diag)) np.testing.assert_array_equal(c_, c) From d7475ca09f7f6ad3993f6ad0dbf40c7530423e21 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 13:47:18 +0200 Subject: [PATCH 068/103] don't store self._X_offset --- sklearn/linear_model/ridge.py | 65 ++++++++++-------------- sklearn/linear_model/tests/test_ridge.py | 11 ++-- 2 files changed, 33 insertions(+), 43 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index b29038362d7d6..9e40330068024 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1144,17 +1144,11 @@ def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) return diag - def _set_intercept(self, X_offset, y_offset, X_scale): - # add the mean of X which was computed separately if X is sparse - if getattr(self, '_X_offset', None) is not None: - X_offset += self._X_offset * X_scale - super()._set_intercept(X_offset, y_offset, X_scale) - def _decompose_gram(self, X, y, sqrt_sw): """Eigendecomposition of X.X^T, used when n_samples <= n_features""" # if X is dense it has already been centered in preprocessing center = self.fit_intercept and sparse.issparse(X) - K, X_m = self._compute_gram(X, sqrt_sw, center=center) + K, X_mean = self._compute_gram(X, sqrt_sw, center=center) if self.fit_intercept: # to emulate centering X with sample weights, # ie removing the weighted average, we add a column @@ -1163,8 +1157,7 @@ def _decompose_gram(self, X, y, sqrt_sw): K += np.outer(sqrt_sw, sqrt_sw) v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) - self._X_offset = X_m - return v, Q, QT_y + return X_mean, v, Q, QT_y def _errors_and_values_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1189,11 +1182,11 @@ def _errors_and_values_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): + def _errors_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): G_diag, c = self._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) return (c / G_diag) ** 2, c - def _values_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): + def _values_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): G_diag, c = self._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) return y - (c / G_diag), c @@ -1201,7 +1194,7 @@ def _decompose_covariance_sparse(self, X, y, sqrt_sw): """Eigendecomposition of X^T.X, used when n_samples > n_features.""" n_samples, n_features = X.shape cov = np.empty((n_features + 1, n_features + 1)) - cov[:-1, :-1], self._X_offset = self._compute_covariance( + cov[:-1, :-1], X_mean = self._compute_covariance( X, sqrt_sw, center=self.fit_intercept) if not self.fit_intercept: cov = cov[:-1, :-1] @@ -1219,10 +1212,10 @@ def _decompose_covariance_sparse(self, X, y, sqrt_sw): # remove eigenvalues and vectors in the null space of X^T.X s = s[kernel_size:] V = V[:, kernel_size:] - return s, V, X + return X_mean, s, V, X def _errors_and_values_covariance_sparse_no_intercept( - self, alpha, y, sqrt_sw, s, V, X): + self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X^T.X @@ -1231,7 +1224,6 @@ def _errors_and_values_covariance_sparse_no_intercept( n_samples, n_features = X.shape w = 1 / (s + alpha) A = (V * w).dot(V.T) - X_mean = self._X_offset AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) y_hat = safe_sparse_dot(X, AXy, dense_output=True) hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) @@ -1241,7 +1233,7 @@ def _errors_and_values_covariance_sparse_no_intercept( return (1 - hat_diag) / alpha, (y - y_hat) / alpha def _errors_and_values_covariance_sparse_intercept( - self, alpha, y, sqrt_sw, s, V, X): + self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X^T.X @@ -1260,7 +1252,6 @@ def _errors_and_values_covariance_sparse_intercept( w = 1 / (s + alpha) w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) - X_mean = self._X_offset # add a column to X containing the square roots of sample weights def matvec(v): @@ -1295,7 +1286,8 @@ def rmatvec(v): hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _errors_and_values_covariance_sparse(self, alpha, y, sqrt_sw, s, V, X): + def _errors_and_values_covariance_sparse( + self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X^T.X @@ -1303,11 +1295,13 @@ def _errors_and_values_covariance_sparse(self, alpha, y, sqrt_sw, s, V, X): """ if self.fit_intercept: return self._errors_and_values_covariance_sparse_intercept( - alpha, y, sqrt_sw, s, V, X) + alpha, y, sqrt_sw, X_mean, s, V, X) return self._errors_and_values_covariance_sparse_no_intercept( - alpha, y, sqrt_sw, s, V, X) + alpha, y, sqrt_sw, X_mean, s, V, X) def _decompose_covariance_dense(self, X, y, sqrt_sw): + # X already centered + X_mean = np.zeros(X.shape[1], dtype=X.dtype) if self.fit_intercept: intercept = sqrt_sw[:, None] X = np.hstack((X, intercept)) @@ -1316,7 +1310,7 @@ def _decompose_covariance_dense(self, X, y, sqrt_sw): U, s, _ = linalg.svd(X, full_matrices=0) v = s ** 2 UT_y = np.dot(U.T, y) - return v, U, UT_y + return X_mean, v, U, UT_y def _errors_and_values_covariance_dense( self, alpha, y, sqrt_sw, v, U, UT_y): @@ -1339,33 +1333,26 @@ def _errors_and_values_covariance_dense( G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_covariance_sparse(self, alpha, y, sqrt_sw, v, U, UT_y): + def _errors_covariance_sparse(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_sparse( - alpha, y, sqrt_sw, v, U, UT_y) + alpha, y, sqrt_sw, X_mean, v, U, UT_y) return (c / G_diag) ** 2, c - def _values_covariance_sparse(self, alpha, y, sqrt_sw, v, U, UT_y): + def _values_covariance_sparse(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_sparse( - alpha, y, sqrt_sw, v, U, UT_y) + alpha, y, sqrt_sw, X_mean, v, U, UT_y) return y - (c / G_diag), c - def _errors_covariance_dense(self, alpha, y, sqrt_sw, v, U, UT_y): + def _errors_covariance_dense(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_dense( alpha, y, sqrt_sw, v, U, UT_y) return (c / G_diag) ** 2, c - def _values_covariance_dense(self, alpha, y, sqrt_sw, v, U, UT_y): + def _values_covariance_dense(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): G_diag, c = self._errors_and_values_covariance_dense( alpha, y, sqrt_sw, v, U, UT_y) return y - (c / G_diag), c - def _remove_temp_vars(self): - for var_name in [ - '_X_offset', '_sqrt_sw', '_weight_sum', '_with_sw', - '_normalized_sqrt_sw', '_sqrt_sw_matrix']: - if hasattr(self, var_name): - delattr(self, var_name) - def fit(self, X, y, sample_weight=None): """Fit Ridge regression model @@ -1428,12 +1415,14 @@ def fit(self, X, y, sample_weight=None): scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None - decomposition = _decompose(X, y, sqrt_sw) + X_mean, *decomposition = _decompose(X, y, sqrt_sw) for i, alpha in enumerate(self.alphas): if error: - out, c = _errors(float(alpha), y, sqrt_sw, *decomposition) + out, c = _errors( + float(alpha), y, sqrt_sw, X_mean, *decomposition) else: - out, c = _values(float(alpha), y, sqrt_sw, *decomposition) + out, c = _values( + float(alpha), y, sqrt_sw, X_mean, *decomposition) cv_values[:, i] = out.ravel() C.append(c) @@ -1456,6 +1445,7 @@ def identity_estimator(): self.dual_coef_ = C[best] self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) + X_offset += X_mean * X_scale self._set_intercept(X_offset, y_offset, X_scale) if self.store_cv_values: @@ -1465,7 +1455,6 @@ def identity_estimator(): cv_values_shape = n_samples, n_y, len(self.alphas) self.cv_values_ = cv_values.reshape(cv_values_shape) - self._remove_temp_vars() return self diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 2fffa514eb946..539bea443124f 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -1017,6 +1017,7 @@ def test_errors_and_values_gram(): alpha = 1. n = 5 sqrt_sw = np.ones(n) + X_m = None y = rng.randn(n) v = rng.randn(n) Q = rng.randn(len(v), len(v)) @@ -1024,18 +1025,17 @@ def test_errors_and_values_gram(): G_diag, c = ridgecv._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) # test that helper function behaves as expected - out, c_ = ridgecv._errors_gram(alpha, y, sqrt_sw, v, Q, QT_y) + out, c_ = ridgecv._errors_gram(alpha, y, sqrt_sw, X_m, v, Q, QT_y) np.testing.assert_array_equal(out, (c / G_diag) ** 2) np.testing.assert_array_equal(c, c) - out, c_ = ridgecv._values_gram(alpha, y, sqrt_sw, v, Q, QT_y) + out, c_ = ridgecv._values_gram(alpha, y, sqrt_sw, X_m, v, Q, QT_y) np.testing.assert_array_equal(out, y - (c / G_diag)) np.testing.assert_array_equal(c_, c) def test_errors_and_values_covariance(): ridgecv = _RidgeGCV() - ridgecv._with_sw = False rng = check_random_state(42) alpha = 1. for n, p in zip((5, 10), (12, 6)): @@ -1044,17 +1044,18 @@ def test_errors_and_values_covariance(): v = rng.randn(p) U = rng.randn(n, p) UT_y = U.T.dot(y) + X_m = np.zeros(p) G_diag, c = ridgecv._errors_and_values_covariance_dense( alpha, y, sqrt_sw, v, U, UT_y) # test that helper function behaves as expected out, c_ = ridgecv._errors_covariance_dense( - alpha, y, sqrt_sw, v, U, UT_y) + alpha, y, sqrt_sw, X_m, v, U, UT_y) np.testing.assert_array_equal(out, (c / G_diag) ** 2) np.testing.assert_array_equal(c, c) out, c_ = ridgecv._values_covariance_dense( - alpha, y, sqrt_sw, v, U, UT_y) + alpha, y, sqrt_sw, X_m, v, U, UT_y) np.testing.assert_array_equal(out, y - (c / G_diag)) np.testing.assert_array_equal(c_, c) From 6beb21a01cf5baa1505a759e52f8e39550740b9b Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 15:02:25 +0200 Subject: [PATCH 069/103] improve test_check_gcv_mode_error --- sklearn/linear_model/tests/test_ridge.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 539bea443124f..807f9764a3c3e 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -400,6 +400,7 @@ def test_check_gcv_mode_error(mode): gcv = RidgeCV(gcv_mode=mode) with pytest.raises(ValueError, match="Unknown value for 'gcv_mode'"): gcv.fit(X, y) + with pytest.raises(ValueError, match="Unknown value for 'gcv_mode'"): _check_gcv_mode(X, mode) From 4971b5d6d39cd9ddc04bc4c5ce21877713744fdb Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 15:23:20 +0200 Subject: [PATCH 070/103] check_scoring earlier in _RidgeGCV fit --- sklearn/linear_model/ridge.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 9e40330068024..34ae38d30c4e9 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1408,13 +1408,13 @@ def fit(self, X, y, sample_weight=None): sqrt_sw = np.sqrt(sample_weight) else: sqrt_sw = np.ones(X.shape[0], dtype=X.dtype) - n_y = 1 if len(y.shape) == 1 else y.shape[1] - cv_values = np.zeros((n_samples * n_y, len(self.alphas))) - C = [] scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None + n_y = 1 if len(y.shape) == 1 else y.shape[1] + cv_values = np.zeros((n_samples * n_y, len(self.alphas))) + C = [] X_mean, *decomposition = _decompose(X, y, sqrt_sw) for i, alpha in enumerate(self.alphas): if error: From b0ddc8d0ffd33934920fb9e5298c03c77f131f90 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 16:08:08 +0200 Subject: [PATCH 071/103] fix case where y.shape == (n_samples, 1) --- sklearn/linear_model/ridge.py | 1 + sklearn/linear_model/tests/test_ridge.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 34ae38d30c4e9..720be0c7090a6 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1255,6 +1255,7 @@ def _errors_and_values_covariance_sparse_intercept( # add a column to X containing the square roots of sample weights def matvec(v): + v = v.ravel() return safe_sparse_dot( X, v[:-1], dense_output=True ) - sqrt_sw * X_mean.dot(v[:-1]) + v[-1] * sqrt_sw diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 807f9764a3c3e..7f94c9940f52c 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -315,16 +315,20 @@ def test_ridge_individual_penalties(): @pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)]) +@pytest.mark.parametrize('y_shape', [(11,), (11, 1), (11, 3)]) @pytest.mark.parametrize('fit_intercept', [True, False]) @pytest.mark.parametrize('normalize', [True, False]) @pytest.mark.parametrize('noise', [1., 30.]) def test_ridge_gcv_vs_ridge_loo_cv( - gcv_mode, X_constructor, X_shape, fit_intercept, normalize, noise): + gcv_mode, X_constructor, X_shape, y_shape, + fit_intercept, normalize, noise): n_samples, n_features = X_shape + n_targets = y_shape[-1] if len(y_shape) == 2 else 1 X, y = make_regression( - n_samples=n_samples, n_features=n_features, n_targets=3, + n_samples=n_samples, n_features=n_features, n_targets=n_targets, random_state=0, shuffle=False, noise=noise, n_informative=5 ) + y = y.reshape(y_shape) X += 30 * np.random.RandomState(0).randn(X.shape[1]) alphas = [1e-3, .1, 1., 10., 1e3] From 8c482787d2276966248dbc62c69eadd72561a366 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 16:15:47 +0200 Subject: [PATCH 072/103] more y shapes with sample weights --- sklearn/linear_model/tests/test_ridge.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 7f94c9940f52c..971066265430e 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -352,14 +352,17 @@ def test_ridge_gcv_vs_ridge_loo_cv( @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('fit_intercept', [True, False]) @pytest.mark.parametrize('n_features', [11, 69]) +@pytest.mark.parametrize('y_shape', [(59,), (59, 1), (59, 3)]) @pytest.mark.parametrize('noise', [1., 30.]) def test_ridge_gcv_sample_weights( - gcv_mode, X_constructor, fit_intercept, n_features, noise): + gcv_mode, X_constructor, fit_intercept, n_features, y_shape, noise): alphas = [1e-3, .1, 1., 10., 1e3] rng = np.random.RandomState(0) + n_targets = y_shape[-1] if len(y_shape) == 2 else 1 x, y = datasets.make_regression( - n_samples=59, n_features=n_features, n_targets=4, + n_samples=59, n_features=n_features, n_targets=n_targets, random_state=0, shuffle=False, noise=noise) + y = y.reshape(y_shape) x += 30 * rng.randn(x.shape[1]) sample_weight = 3 * rng.randn(len(x)) sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) @@ -389,7 +392,10 @@ def test_ridge_gcv_sample_weights( alphas=alphas, store_cv_values=True, gcv_mode=gcv_mode, fit_intercept=fit_intercept) gcv_ridge.fit(x_gcv, y, sample_weight=sample_weight) - gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] + if len(y_shape) == 2: + gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] + else: + gcv_errors = gcv_ridge.cv_values_[:, alphas.index(kfold.alpha_)] assert kfold.alpha_ == gcv_ridge.alpha_ assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) From b0a82fd3df077ece52ae8ce40cfc54ac2b3ba87a Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 17:54:24 +0200 Subject: [PATCH 073/103] reduce number of combinations in ridgegcv tests + smaller test data --- sklearn/linear_model/tests/test_ridge.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 971066265430e..3ee9fd57d0e93 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -315,10 +315,14 @@ def test_ridge_individual_penalties(): @pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)]) -@pytest.mark.parametrize('y_shape', [(11,), (11, 1), (11, 3)]) -@pytest.mark.parametrize('fit_intercept', [True, False]) -@pytest.mark.parametrize('normalize', [True, False]) -@pytest.mark.parametrize('noise', [1., 30.]) +@pytest.mark.parametrize( + 'y_shape, fit_intercept, normalize, noise', + [ + ((11,), True, True, 1.), + ((11, 1), True, False, 20.), + ((11, 3), False, False, 30.), + ] +) def test_ridge_gcv_vs_ridge_loo_cv( gcv_mode, X_constructor, X_shape, y_shape, fit_intercept, normalize, noise): @@ -350,17 +354,18 @@ def test_ridge_gcv_vs_ridge_loo_cv( @pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) -@pytest.mark.parametrize('fit_intercept', [True, False]) -@pytest.mark.parametrize('n_features', [11, 69]) -@pytest.mark.parametrize('y_shape', [(59,), (59, 1), (59, 3)]) -@pytest.mark.parametrize('noise', [1., 30.]) +@pytest.mark.parametrize('n_features', [8, 20]) +@pytest.mark.parametrize('y_shape, fit_intercept, noise', + [((11,), True, 1.), + ((11, 1), True, 20.), + ((11, 3), False, 30.)]) def test_ridge_gcv_sample_weights( gcv_mode, X_constructor, fit_intercept, n_features, y_shape, noise): alphas = [1e-3, .1, 1., 10., 1e3] rng = np.random.RandomState(0) n_targets = y_shape[-1] if len(y_shape) == 2 else 1 x, y = datasets.make_regression( - n_samples=59, n_features=n_features, n_targets=n_targets, + n_samples=11, n_features=n_features, n_targets=n_targets, random_state=0, shuffle=False, noise=noise) y = y.reshape(y_shape) x += 30 * rng.randn(x.shape[1]) From 659cb14fba1d745391160ef26c14cecc24c6780a Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 18:01:15 +0200 Subject: [PATCH 074/103] pep8 --- sklearn/linear_model/tests/test_ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 3ee9fd57d0e93..4af3729732679 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -358,7 +358,7 @@ def test_ridge_gcv_vs_ridge_loo_cv( @pytest.mark.parametrize('y_shape, fit_intercept, noise', [((11,), True, 1.), ((11, 1), True, 20.), - ((11, 3), False, 30.)]) + ((11, 3), False, 30.)]) def test_ridge_gcv_sample_weights( gcv_mode, X_constructor, fit_intercept, n_features, y_shape, noise): alphas = [1e-3, .1, 1., 10., 1e3] From 44616c84b14ffcaaba7950fac1c1720abd3b8de5 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 18:05:51 +0200 Subject: [PATCH 075/103] better parameter name --- sklearn/linear_model/tests/test_ridge.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 4af3729732679..8560c7f8426ed 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -420,18 +420,18 @@ def test_check_gcv_mode_error(mode): @pytest.mark.parametrize( - 'mode, mode_samples_sup_features, mode_features_sup_samples', + 'mode, mode_n_greater_than_p, mode_p_greater_than_n', [(None, 'svd', 'eigen'), ('auto', 'svd', 'eigen'), ('eigen', 'eigen', 'eigen'), ('svd', 'svd', 'svd')] ) -def test_check_gcv_mode_choice(mode, mode_samples_sup_features, - mode_features_sup_samples): +def test_check_gcv_mode_choice(mode, mode_n_greater_than_p, + mode_p_greater_than_n): X, _ = make_regression(n_samples=5, n_features=2) - assert _check_gcv_mode(X, mode) == mode_samples_sup_features - assert _check_gcv_mode(X.T, mode) == mode_features_sup_samples + assert _check_gcv_mode(X, mode) == mode_n_greater_than_p + assert _check_gcv_mode(X.T, mode) == mode_p_greater_than_n def _test_ridge_loo(filter_): From bdd1f8e43604068a2f7c815c72f3a187746c9a74 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 18:18:05 +0200 Subject: [PATCH 076/103] changes suggested by @ogrisel --- sklearn/linear_model/ridge.py | 15 ++++++++------- sklearn/linear_model/tests/test_ridge.py | 6 ++++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 720be0c7090a6..e105a5498304c 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1567,12 +1567,15 @@ class RidgeCV(_BaseRidgeCV, RegressorMixin): A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. + If None, the negative mean squared error if cv is 'auto' or None + (i.e. when using generalized cross-validation), and r2 score otherwise. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the efficient Leave-One-Out cross-validation + (also known as Generalized Cross-Validation). - integer, to specify the number of folds. - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. @@ -1588,15 +1591,13 @@ class RidgeCV(_BaseRidgeCV, RegressorMixin): Flag indicating which strategy to use when performing Generalized Cross-Validation. Options are:: - 'auto' : use svd if n_samples > n_features or when X is a sparse - matrix, otherwise use eigen - 'svd' : force computation via singular value decomposition of X - (does not work for sparse matrices) - 'eigen' : force computation via eigendecomposition of X^T X + 'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen' + 'svd' : force use of singular value decomposition of X when X is + dense, eigenvalue decomposition of X^T.X when X is sparse. + 'eigen' : force computation via eigendecomposition of X.X^T The 'auto' mode is the default and is intended to pick the cheaper - option of the two depending upon the shape and format of the training - data. + option of the two depending on the shape of the training data. store_cv_values : boolean, default=False Flag indicating if the cross-validation values corresponding to diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 8560c7f8426ed..0998a16beabaa 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -419,6 +419,7 @@ def test_check_gcv_mode_error(mode): _check_gcv_mode(X, mode) +@pytest.mark.parametrize("sparse", [True, False]) @pytest.mark.parametrize( 'mode, mode_n_greater_than_p, mode_p_greater_than_n', [(None, 'svd', 'eigen'), @@ -426,10 +427,11 @@ def test_check_gcv_mode_error(mode): ('eigen', 'eigen', 'eigen'), ('svd', 'svd', 'svd')] ) -def test_check_gcv_mode_choice(mode, mode_n_greater_than_p, +def test_check_gcv_mode_choice(sparse, mode, mode_n_greater_than_p, mode_p_greater_than_n): X, _ = make_regression(n_samples=5, n_features=2) - + if sparse: + X = sp.csr_matrix(X) assert _check_gcv_mode(X, mode) == mode_n_greater_than_p assert _check_gcv_mode(X.T, mode) == mode_p_greater_than_n From 3a4e0c7f4217e7dcccac308ac4519337b941ec23 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 18:44:01 +0200 Subject: [PATCH 077/103] kernel_size -> nullspace_dim --- sklearn/linear_model/ridge.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index e105a5498304c..4c0bbd714e068 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1207,11 +1207,11 @@ def _decompose_covariance_sparse(self, X, y, sqrt_sw): cov[-1] = 0 cov[:, -1] = 0 cov[-1, -1] = sqrt_sw.dot(sqrt_sw) - kernel_size = max(0, X.shape[1] - X.shape[0]) + nullspace_dim = max(0, X.shape[1] - X.shape[0]) s, V = linalg.eigh(cov) # remove eigenvalues and vectors in the null space of X^T.X - s = s[kernel_size:] - V = V[:, kernel_size:] + s = s[nullspace_dim:] + V = V[:, nullspace_dim:] return X_mean, s, V, X def _errors_and_values_covariance_sparse_no_intercept( From 05c56f9d70bc3ff875ee8e7183b1aff1112012d1 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 18:58:57 +0200 Subject: [PATCH 078/103] specify why we ignore DeprecationWarning in ridgecv test --- sklearn/linear_model/tests/test_ridge.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 0998a16beabaa..78eb6c8d68fe0 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -333,7 +333,8 @@ def test_ridge_gcv_vs_ridge_loo_cv( random_state=0, shuffle=False, noise=noise, n_informative=5 ) y = y.reshape(y_shape) - X += 30 * np.random.RandomState(0).randn(X.shape[1]) + # X += 30 * np.random.RandomState(0).randn(X.shape[1]) + X += 30 alphas = [1e-3, .1, 1., 10., 1e3] loo_ridge = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, @@ -368,7 +369,8 @@ def test_ridge_gcv_sample_weights( n_samples=11, n_features=n_features, n_targets=n_targets, random_state=0, shuffle=False, noise=noise) y = y.reshape(y_shape) - x += 30 * rng.randn(x.shape[1]) + # x += 30 * rng.randn(x.shape[1]) + x += 30 sample_weight = 3 * rng.randn(len(x)) sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) indices = np.repeat(np.arange(x.shape[0]), sample_weight) @@ -380,7 +382,10 @@ def test_ridge_gcv_sample_weights( kfold = RidgeCV( alphas=alphas, cv=splits, scoring='neg_mean_squared_error', fit_intercept=fit_intercept) - with ignore_warnings(): + # ignore warning from GridSearchCV: DeprecationWarning: The default of the + # `iid` parameter will change from True to False in version 0.22 and will + # be removed in 0.24 + with ignore_warnings(category=DeprecationWarning): kfold.fit(X_tiled, y_tiled) ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept) From 9a05f60ef5e51c1bd79c57f1f0d51914bf1c8652 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 19:02:07 +0200 Subject: [PATCH 079/103] constant offset in test and x -> X --- sklearn/linear_model/tests/test_ridge.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 78eb6c8d68fe0..00b671c52fe64 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -333,7 +333,6 @@ def test_ridge_gcv_vs_ridge_loo_cv( random_state=0, shuffle=False, noise=noise, n_informative=5 ) y = y.reshape(y_shape) - # X += 30 * np.random.RandomState(0).randn(X.shape[1]) X += 30 alphas = [1e-3, .1, 1., 10., 1e3] @@ -365,19 +364,18 @@ def test_ridge_gcv_sample_weights( alphas = [1e-3, .1, 1., 10., 1e3] rng = np.random.RandomState(0) n_targets = y_shape[-1] if len(y_shape) == 2 else 1 - x, y = datasets.make_regression( + X, y = datasets.make_regression( n_samples=11, n_features=n_features, n_targets=n_targets, random_state=0, shuffle=False, noise=noise) y = y.reshape(y_shape) - # x += 30 * rng.randn(x.shape[1]) - x += 30 - sample_weight = 3 * rng.randn(len(x)) + X += 30 + sample_weight = 3 * rng.randn(len(X)) sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) - indices = np.repeat(np.arange(x.shape[0]), sample_weight) + indices = np.repeat(np.arange(X.shape[0]), sample_weight) sample_weight = sample_weight.astype(float) - X_tiled, y_tiled = x[indices], y[indices] + X_tiled, y_tiled = X[indices], y[indices] - cv = GroupKFold(n_splits=x.shape[0]) + cv = GroupKFold(n_splits=X.shape[0]) splits = cv.split(X_tiled, y_tiled, groups=indices) kfold = RidgeCV( alphas=alphas, cv=splits, scoring='neg_mean_squared_error', @@ -394,14 +392,14 @@ def test_ridge_gcv_sample_weights( kfold_errors = (y_tiled - predictions)**2 kfold_errors = [ np.sum(kfold_errors[indices == i], axis=0) for - i in np.arange(x.shape[0])] + i in np.arange(X.shape[0])] kfold_errors = np.asarray(kfold_errors) - x_gcv = X_constructor(x) + X_gcv = X_constructor(X) gcv_ridge = RidgeCV( alphas=alphas, store_cv_values=True, gcv_mode=gcv_mode, fit_intercept=fit_intercept) - gcv_ridge.fit(x_gcv, y, sample_weight=sample_weight) + gcv_ridge.fit(X_gcv, y, sample_weight=sample_weight) if len(y_shape) == 2: gcv_errors = gcv_ridge.cv_values_[:, :, alphas.index(kfold.alpha_)] else: From 9e678248c3c6c6d8a971eeacede63289c0ea8bb7 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 19:25:44 +0200 Subject: [PATCH 080/103] increase rtol to 1e-3 --- sklearn/linear_model/tests/test_ridge.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 00b671c52fe64..90c166df39a1e 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -348,8 +348,8 @@ def test_ridge_gcv_vs_ridge_loo_cv( gcv_ridge.fit(X_gcv, y) assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_) - assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-4) - assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-4) + assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-3) @pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) From 70a813566fee676f58fa779f37386371b45541e6 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 19:27:47 +0200 Subject: [PATCH 081/103] add constant offset to y --- sklearn/linear_model/tests/test_ridge.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 90c166df39a1e..cd45810b567c9 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -330,7 +330,8 @@ def test_ridge_gcv_vs_ridge_loo_cv( n_targets = y_shape[-1] if len(y_shape) == 2 else 1 X, y = make_regression( n_samples=n_samples, n_features=n_features, n_targets=n_targets, - random_state=0, shuffle=False, noise=noise, n_informative=5 + random_state=0, shuffle=False, noise=noise, n_informative=5, + bias=13. ) y = y.reshape(y_shape) X += 30 @@ -366,7 +367,7 @@ def test_ridge_gcv_sample_weights( n_targets = y_shape[-1] if len(y_shape) == 2 else 1 X, y = datasets.make_regression( n_samples=11, n_features=n_features, n_targets=n_targets, - random_state=0, shuffle=False, noise=noise) + random_state=0, shuffle=False, noise=noise, bias=13.) y = y.reshape(y_shape) X += 30 sample_weight = 3 * rng.randn(len(X)) From 161ca89f3b77f04be1a21eea5d4c82909cb1b3b0 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 19:31:02 +0200 Subject: [PATCH 082/103] more doc on gcv --- doc/modules/linear_model.rst | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/doc/modules/linear_model.rst b/doc/modules/linear_model.rst index a370791d248e2..f2bbf1374809a 100644 --- a/doc/modules/linear_model.rst +++ b/doc/modules/linear_model.rst @@ -141,12 +141,22 @@ as GridSearchCV except that it defaults to Generalized Cross-Validation (GCV), an efficient form of leave-one-out cross-validation:: >>> from sklearn import linear_model - >>> reg = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0], cv=3) + >>> reg = linear_model.RidgeCV(alphas=[1.0, 10.0], store_cv_values=True) >>> reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +SKIP - RidgeCV(alphas=[0.1, 1.0, 10.0], cv=3, fit_intercept=True, scoring=None, - normalize=False) - >>> reg.alpha_ # doctest: +SKIP - 0.1 + RidgeCV(alphas=array([ 1., 10.]), cv=None, fit_intercept=True, gcv_mode=None, + normalize=False, scoring=None, store_cv_values=True) + >>> reg.alpha_ # doctest: +SKIP + 1.0 + >>> reg.cv_values_ + array([[0.105625 , 0.25917355], + [0.0225 , 0.12570248], + [0.9025 , 0.9025 ]]) + +In some cases, for example for very large datasets, this is not the most +efficient way to set the hyperparameter, and k-fold cross-validation can be +faster. Specifying the value of the `cv` attribute will trigger the use of +cross-validation with `GridSearchCV`, for example `cv=10` for 10-fold +cross-validation. .. topic:: References From 36d248d1961d20ee47217a895c8ee357e73b6d25 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Thu, 2 May 2019 20:00:50 +0200 Subject: [PATCH 083/103] doctest skip --- doc/modules/linear_model.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/modules/linear_model.rst b/doc/modules/linear_model.rst index f2bbf1374809a..0224765bf958c 100644 --- a/doc/modules/linear_model.rst +++ b/doc/modules/linear_model.rst @@ -147,7 +147,7 @@ as GridSearchCV except that it defaults to Generalized Cross-Validation normalize=False, scoring=None, store_cv_values=True) >>> reg.alpha_ # doctest: +SKIP 1.0 - >>> reg.cv_values_ + >>> reg.cv_values_ # doctest: +SKIP array([[0.105625 , 0.25917355], [0.0225 , 0.12570248], [0.9025 , 0.9025 ]]) From c439ccbf2b176ae71df42d9acb97ee3a9a989bf4 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 08:07:44 +0200 Subject: [PATCH 084/103] perf: use LinearOperator transpose rather than _rmatvec --- sklearn/linear_model/ridge.py | 94 +++++++++++++++++------- sklearn/linear_model/tests/test_ridge.py | 26 ++++++- 2 files changed, 89 insertions(+), 31 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 4c0bbd714e068..468836b46a511 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -954,6 +954,70 @@ def _find_smallest_angle(query, vectors): return index +class _X_operator(sparse.linalg.LinearOperator): + """Behaves as centered and scaled X with an added intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]) + """ + + def __init__(self, X, X_mean, sqrt_sw): + self.n_samples, self.n_features = X.shape + super().__init__(X.dtype, (self.n_samples, self.n_features + 1)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + return safe_sparse_dot( + self.X, v[:-1], dense_output=True + ) - self.sqrt_sw * self.X_mean.dot(v[:-1]) + v[-1] * self.sqrt_sw + + def _matmat(self, v): + return ( + safe_sparse_dot(self.X, v[:-1], dense_output=True) - + self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + v[-1] * + self.sqrt_sw[:, None]) + + def _transpose(self): + return _Xt_operator(self.X, self.X_mean, self.sqrt_sw) + + +class _Xt_operator(sparse.linalg.LinearOperator): + """Behaves as transposed centered and scaled X with an intercept column. + + This operator behaves as + np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T + """ + + def __init__(self, X, X_mean, sqrt_sw): + self.n_samples, self.n_features = X.shape + super().__init__(X.dtype, (self.n_features + 1, self.n_samples)) + self.X = X + self.X_mean = X_mean + self.sqrt_sw = sqrt_sw + + def _matvec(self, v): + v = v.ravel() + res = np.empty(self.n_features + 1) + res[:-1] = ( + safe_sparse_dot(self.X.T, v, dense_output=True) - + (self.X_mean * self.sqrt_sw.dot(v)) + ) + res[-1] = np.dot(v, self.sqrt_sw) + return res + + def _matmat(self, v): + res = np.empty((self.n_features + 1, v.shape[1])) + res[:-1] = ( + safe_sparse_dot(self.X.T, v, dense_output=True) - + self.X_mean[:, None] * self.sqrt_sw.dot(v) + ) + res[-1] = np.dot(self.sqrt_sw, v) + return res + + class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation @@ -1253,33 +1317,9 @@ def _errors_and_values_covariance_sparse_intercept( w[intercept_dim] = 1 / s[intercept_dim] A = (V * w).dot(V.T) # add a column to X containing the square roots of sample weights - - def matvec(v): - v = v.ravel() - return safe_sparse_dot( - X, v[:-1], dense_output=True - ) - sqrt_sw * X_mean.dot(v[:-1]) + v[-1] * sqrt_sw - - def matmat(v): - return ( - safe_sparse_dot(X, v[:-1], dense_output=True) - - sqrt_sw[:, None] * X_mean.dot(v[:-1]) + v[-1] * - sqrt_sw[:, None]) - - def rmatvec(v): - v = v.ravel() - res = np.empty(n_features + 1) - res[:-1] = safe_sparse_dot( - X.T, v, dense_output=True) - X_mean * sqrt_sw.dot(v) - res[-1] = np.dot(v, sqrt_sw) - return res - - Xop = sparse.linalg.LinearOperator( - matvec=matvec, matmat=matmat, rmatvec=rmatvec, - shape=(n_samples, n_features + 1), dtype=X.dtype - ) - AXy = A.dot(Xop.adjoint().dot(y)) - y_hat = Xop.dot(AXy) + X_op = _X_operator(X, X_mean, sqrt_sw) + AXy = A.dot(X_op.T.dot(y)) + y_hat = X_op.dot(AXy) hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw) # return (1 - hat_diag), (y - y_hat) if len(y.shape) != 1: diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index cd45810b567c9..4ccbe0ce1d79c 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -34,6 +34,7 @@ from sklearn.linear_model.ridge import _solve_cholesky from sklearn.linear_model.ridge import _solve_cholesky_kernel from sklearn.linear_model.ridge import _check_gcv_mode +from sklearn.linear_model.ridge import _X_operator from sklearn.datasets import make_regression from sklearn.model_selection import GridSearchCV @@ -312,15 +313,31 @@ def test_ridge_individual_penalties(): assert_raises(ValueError, ridge.fit, X, y) +@pytest.mark.parametrize('n_col', [(), (1,), (3,)]) +def test_x_operator(n_col): + rng = np.random.RandomState(0) + X = rng.randn(11, 8) + X_m = rng.randn(8) + sqrt_sw = rng.randn(len(X)) + Y = rng.randn(11, *n_col) + A = rng.randn(9, *n_col) + operator = _X_operator(sp.csr_matrix(X), X_m, sqrt_sw) + reference_operator = np.hstack( + [X - sqrt_sw[:, None] * X_m, sqrt_sw[:, None]]) + assert_allclose(reference_operator.dot(A), operator.dot(A)) + assert_allclose(reference_operator.T.dot(Y), operator.T.dot(Y)) + + @pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)]) +@pytest.mark.parametrize('fit_intercept', [True, False]) @pytest.mark.parametrize( - 'y_shape, fit_intercept, normalize, noise', + 'y_shape, normalize, noise', [ - ((11,), True, True, 1.), - ((11, 1), True, False, 20.), - ((11, 3), False, False, 30.), + ((11,), True, 1.), + ((11, 1), False, 20.), + ((11, 3), False, 30.), ] ) def test_ridge_gcv_vs_ridge_loo_cv( @@ -359,6 +376,7 @@ def test_ridge_gcv_vs_ridge_loo_cv( @pytest.mark.parametrize('y_shape, fit_intercept, noise', [((11,), True, 1.), ((11, 1), True, 20.), + ((11, 3), True, 30.), ((11, 3), False, 30.)]) def test_ridge_gcv_sample_weights( gcv_mode, X_constructor, fit_intercept, n_features, y_shape, noise): From bdea42268ad49a77329ae615cdb7df0e80e3a09e Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 11:06:44 +0200 Subject: [PATCH 085/103] don't skip ridge doctest in linear_model.rst --- doc/modules/linear_model.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/modules/linear_model.rst b/doc/modules/linear_model.rst index 0224765bf958c..a499d69b83da0 100644 --- a/doc/modules/linear_model.rst +++ b/doc/modules/linear_model.rst @@ -142,15 +142,15 @@ as GridSearchCV except that it defaults to Generalized Cross-Validation >>> from sklearn import linear_model >>> reg = linear_model.RidgeCV(alphas=[1.0, 10.0], store_cv_values=True) - >>> reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +SKIP + >>> reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +NORMALIZE_WHITESPACE RidgeCV(alphas=array([ 1., 10.]), cv=None, fit_intercept=True, gcv_mode=None, normalize=False, scoring=None, store_cv_values=True) - >>> reg.alpha_ # doctest: +SKIP + >>> reg.alpha_ 1.0 - >>> reg.cv_values_ # doctest: +SKIP - array([[0.105625 , 0.25917355], - [0.0225 , 0.12570248], - [0.9025 , 0.9025 ]]) + >>> reg.cv_values_ # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS + array([[0.105... , 0.259...], + [0.022... , 0.125...], + [0.902... , 0.902...]]) In some cases, for example for very large datasets, this is not the most efficient way to set the hyperparameter, and k-fold cross-validation can be From 0e29d14bda5ec8098c373c29adc5d58cc99f561b Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 12:18:34 +0200 Subject: [PATCH 086/103] docstring --- doc/modules/linear_model.rst | 2 +- sklearn/linear_model/ridge.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/modules/linear_model.rst b/doc/modules/linear_model.rst index a499d69b83da0..7a54e79402b8e 100644 --- a/doc/modules/linear_model.rst +++ b/doc/modules/linear_model.rst @@ -136,7 +136,7 @@ Setting the regularization parameter: generalized Cross-Validation ------------------------------------------------------------------ :class:`RidgeCV` implements ridge regression with built-in -cross-validation of the alpha parameter. The object works in the same way +cross-validation of the alpha parameter. The object works in the same way as GridSearchCV except that it defaults to Generalized Cross-Validation (GCV), an efficient form of leave-one-out cross-validation:: diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 468836b46a511..6586503503d31 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1589,6 +1589,7 @@ class RidgeCV(_BaseRidgeCV, RegressorMixin): the estimates. Larger values specify stronger regularization. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. + If using generalized cross-validation, alphas must be positive. fit_intercept : boolean Whether to calculate the intercept for this model. If set From 3a77e050d28295c069dc1b1dca1a708843ebdea6 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 15:21:58 +0200 Subject: [PATCH 087/103] improve RidgeCV narrative doc --- doc/modules/linear_model.rst | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/doc/modules/linear_model.rst b/doc/modules/linear_model.rst index 7a54e79402b8e..c01b74775684f 100644 --- a/doc/modules/linear_model.rst +++ b/doc/modules/linear_model.rst @@ -140,23 +140,20 @@ cross-validation of the alpha parameter. The object works in the same way as GridSearchCV except that it defaults to Generalized Cross-Validation (GCV), an efficient form of leave-one-out cross-validation:: + >>> import numpy as np >>> from sklearn import linear_model - >>> reg = linear_model.RidgeCV(alphas=[1.0, 10.0], store_cv_values=True) + >>> reg = linear_model.RidgeCV(alphas=np.logspace(-6, 6, 13)) >>> reg.fit([[0, 0], [0, 0], [1, 1]], [0, .1, 1]) # doctest: +NORMALIZE_WHITESPACE - RidgeCV(alphas=array([ 1., 10.]), cv=None, fit_intercept=True, gcv_mode=None, - normalize=False, scoring=None, store_cv_values=True) + RidgeCV(alphas=array([1.e-06, 1.e-05, 1.e-04, 1.e-03, 1.e-02, 1.e-01, 1.e+00, 1.e+01, + 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06]), + cv=None, fit_intercept=True, gcv_mode=None, normalize=False, + scoring=None, store_cv_values=False) >>> reg.alpha_ - 1.0 - >>> reg.cv_values_ # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - array([[0.105... , 0.259...], - [0.022... , 0.125...], - [0.902... , 0.902...]]) - -In some cases, for example for very large datasets, this is not the most -efficient way to set the hyperparameter, and k-fold cross-validation can be -faster. Specifying the value of the `cv` attribute will trigger the use of + 0.01 + +Specifying the value of the `cv` attribute will trigger the use of cross-validation with `GridSearchCV`, for example `cv=10` for 10-fold -cross-validation. +cross-validation, rather than Generalized Cross-Validation. .. topic:: References From bc3ff7e2abdf84468d93011ef2debcd19ccf0502 Mon Sep 17 00:00:00 2001 From: Olivier Grisel Date: Fri, 3 May 2019 19:26:49 +0200 Subject: [PATCH 088/103] Update sklearn/linear_model/tests/test_ridge.py Co-Authored-By: jeromedockes --- sklearn/linear_model/tests/test_ridge.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 4ccbe0ce1d79c..ee076d15d9c0c 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -424,7 +424,7 @@ def test_ridge_gcv_sample_weights( else: gcv_errors = gcv_ridge.cv_values_[:, alphas.index(kfold.alpha_)] - assert kfold.alpha_ == gcv_ridge.alpha_ + assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_) assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=5e-2) From 34d2c2b9283cdd942dc03d3f608bafcedec78965 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 19:28:33 +0200 Subject: [PATCH 089/103] reduce tolerance to 1e-3 in test_ridge_gcv_sample_weights --- sklearn/linear_model/tests/test_ridge.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index ee076d15d9c0c..dc5a7f813c538 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -425,10 +425,10 @@ def test_ridge_gcv_sample_weights( gcv_errors = gcv_ridge.cv_values_[:, alphas.index(kfold.alpha_)] assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_) - assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) - assert_allclose(gcv_errors, kfold_errors, rtol=5e-2) - assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=5e-2) - assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=5e-2) + assert_allclose(gcv_errors, kfold_errors, rtol=1e-3) + assert_allclose(gcv_errors, kfold_errors, rtol=1e-3) + assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=1e-3) + assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-3) @pytest.mark.parametrize('mode', [True, 1, 5, 'bad', 'gcv', np.arange(3)]) From 21cf6bf8f8cc4a2863c3a6d33f6fb959c473b562 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 19:29:51 +0200 Subject: [PATCH 090/103] duplicated line --- sklearn/linear_model/tests/test_ridge.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index dc5a7f813c538..fec40bb3b3372 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -426,7 +426,6 @@ def test_ridge_gcv_sample_weights( assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_) assert_allclose(gcv_errors, kfold_errors, rtol=1e-3) - assert_allclose(gcv_errors, kfold_errors, rtol=1e-3) assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=1e-3) assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-3) From 4d4f819a2bdb90b0ee6955444c0697f9b0fd28af Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 19:37:44 +0200 Subject: [PATCH 091/103] better explaination of column capturing intercept --- sklearn/linear_model/ridge.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 6586503503d31..c2042e59b3b21 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1344,10 +1344,11 @@ def _decompose_covariance_dense(self, X, y, sqrt_sw): # X already centered X_mean = np.zeros(X.shape[1], dtype=X.dtype) if self.fit_intercept: - intercept = sqrt_sw[:, None] - X = np.hstack((X, intercept)) - # to emulate fit_intercept=True situation, add a column on ones - # Note that by centering, the other columns are orthogonal to that one + # to emulate fit_intercept=True situation, add a column + # containing the square roots of the sample weights + # by centering, the other columns are orthogonal to that one + intercept_column = sqrt_sw[:, None] + X = np.hstack((X, intercept_column)) U, s, _ = linalg.svd(X, full_matrices=0) v = s ** 2 UT_y = np.dot(U.T, y) From 1b8d288a22314d1816e4e0bb6c2fdc035a8bf572 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Fri, 3 May 2019 19:51:42 +0200 Subject: [PATCH 092/103] errors_and_values -> solve --- sklearn/linear_model/ridge.py | 26 ++++++++++++------------ sklearn/linear_model/tests/test_ridge.py | 8 ++++---- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index c2042e59b3b21..31e8df7c52834 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1223,7 +1223,7 @@ def _decompose_gram(self, X, y, sqrt_sw): QT_y = np.dot(Q.T, y) return X_mean, v, Q, QT_y - def _errors_and_values_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): + def _solve_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X.X^T (n_features >= n_samples). @@ -1247,11 +1247,11 @@ def _errors_and_values_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): return G_diag, c def _errors_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): - G_diag, c = self._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) + G_diag, c = self._solve_gram(alpha, y, sqrt_sw, v, Q, QT_y) return (c / G_diag) ** 2, c def _values_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): - G_diag, c = self._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) + G_diag, c = self._solve_gram(alpha, y, sqrt_sw, v, Q, QT_y) return y - (c / G_diag), c def _decompose_covariance_sparse(self, X, y, sqrt_sw): @@ -1278,7 +1278,7 @@ def _decompose_covariance_sparse(self, X, y, sqrt_sw): V = V[:, nullspace_dim:] return X_mean, s, V, X - def _errors_and_values_covariance_sparse_no_intercept( + def _solve_covariance_sparse_no_intercept( self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1296,7 +1296,7 @@ def _errors_and_values_covariance_sparse_no_intercept( hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _errors_and_values_covariance_sparse_intercept( + def _solve_covariance_sparse_intercept( self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1327,7 +1327,7 @@ def _errors_and_values_covariance_sparse_intercept( hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _errors_and_values_covariance_sparse( + def _solve_covariance_sparse( self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1335,9 +1335,9 @@ def _errors_and_values_covariance_sparse( (n_features < n_samples and X is sparse). """ if self.fit_intercept: - return self._errors_and_values_covariance_sparse_intercept( + return self._solve_covariance_sparse_intercept( alpha, y, sqrt_sw, X_mean, s, V, X) - return self._errors_and_values_covariance_sparse_no_intercept( + return self._solve_covariance_sparse_no_intercept( alpha, y, sqrt_sw, X_mean, s, V, X) def _decompose_covariance_dense(self, X, y, sqrt_sw): @@ -1354,7 +1354,7 @@ def _decompose_covariance_dense(self, X, y, sqrt_sw): UT_y = np.dot(U.T, y) return X_mean, v, U, UT_y - def _errors_and_values_covariance_dense( + def _solve_covariance_dense( self, alpha, y, sqrt_sw, v, U, UT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1376,22 +1376,22 @@ def _errors_and_values_covariance_dense( return G_diag, c def _errors_covariance_sparse(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._errors_and_values_covariance_sparse( + G_diag, c = self._solve_covariance_sparse( alpha, y, sqrt_sw, X_mean, v, U, UT_y) return (c / G_diag) ** 2, c def _values_covariance_sparse(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._errors_and_values_covariance_sparse( + G_diag, c = self._solve_covariance_sparse( alpha, y, sqrt_sw, X_mean, v, U, UT_y) return y - (c / G_diag), c def _errors_covariance_dense(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._errors_and_values_covariance_dense( + G_diag, c = self._solve_covariance_dense( alpha, y, sqrt_sw, v, U, UT_y) return (c / G_diag) ** 2, c def _values_covariance_dense(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._errors_and_values_covariance_dense( + G_diag, c = self._solve_covariance_dense( alpha, y, sqrt_sw, v, U, UT_y) return y - (c / G_diag), c diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index fec40bb3b3372..021c066285501 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -1049,7 +1049,7 @@ def test_ridge_regression_check_arguments_validity(return_intercept, assert_allclose(out, true_coefs, rtol=0, atol=atol) -def test_errors_and_values_gram(): +def test_solve_gram(): ridgecv = _RidgeGCV() ridgecv._with_sw = False rng = check_random_state(42) @@ -1061,7 +1061,7 @@ def test_errors_and_values_gram(): v = rng.randn(n) Q = rng.randn(len(v), len(v)) QT_y = Q.T.dot(y) - G_diag, c = ridgecv._errors_and_values_gram(alpha, y, sqrt_sw, v, Q, QT_y) + G_diag, c = ridgecv._solve_gram(alpha, y, sqrt_sw, v, Q, QT_y) # test that helper function behaves as expected out, c_ = ridgecv._errors_gram(alpha, y, sqrt_sw, X_m, v, Q, QT_y) @@ -1073,7 +1073,7 @@ def test_errors_and_values_gram(): np.testing.assert_array_equal(c_, c) -def test_errors_and_values_covariance(): +def test_solve_covariance(): ridgecv = _RidgeGCV() rng = check_random_state(42) alpha = 1. @@ -1084,7 +1084,7 @@ def test_errors_and_values_covariance(): U = rng.randn(n, p) UT_y = U.T.dot(y) X_m = np.zeros(p) - G_diag, c = ridgecv._errors_and_values_covariance_dense( + G_diag, c = ridgecv._solve_covariance_dense( alpha, y, sqrt_sw, v, U, UT_y) # test that helper function behaves as expected From 80244ba820a1e8a0579a9761f2887f3736176b67 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sat, 4 May 2019 10:25:01 +0200 Subject: [PATCH 093/103] make test data design matrix actually sparse in ridgegcv tests --- sklearn/linear_model/tests/test_ridge.py | 36 +++++++++++++++++++----- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 021c066285501..2a17a77dae26e 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -328,6 +328,30 @@ def test_x_operator(n_col): assert_allclose(reference_operator.T.dot(Y), operator.T.dot(Y)) +def _make_sparse_offset_regression( + n_samples=100, n_features=100, proportion_nonzero=.5, + n_informative=10, n_targets=1, bias=13., X_offset=30., + noise=30., shuffle=True, coef=False, random_state=None): + X, y, c = make_regression( + n_samples=n_samples, n_features=n_features, n_informative=n_informative, + n_targets=n_targets, bias=bias, noise=noise, shuffle=shuffle, + coef=True, random_state=random_state) + if n_features == 1: + c = np.asarray([c]) + X += X_offset + mask = np.random.RandomState(random_state).binomial( + 1, proportion_nonzero, X.shape) > 0 + removed_X = X.copy() + X[~mask] = 0. + removed_X[mask] = 0. + y -= removed_X.dot(c) + if n_features == 1: + c = c[0] + if coef: + return X, y, c + return X, y + + @pytest.mark.parametrize('gcv_mode', ['svd', 'eigen']) @pytest.mark.parametrize('X_constructor', [np.asarray, sp.csr_matrix]) @pytest.mark.parametrize('X_shape', [(11, 8), (11, 20)]) @@ -345,13 +369,11 @@ def test_ridge_gcv_vs_ridge_loo_cv( fit_intercept, normalize, noise): n_samples, n_features = X_shape n_targets = y_shape[-1] if len(y_shape) == 2 else 1 - X, y = make_regression( + X, y = _make_sparse_offset_regression( n_samples=n_samples, n_features=n_features, n_targets=n_targets, - random_state=0, shuffle=False, noise=noise, n_informative=5, - bias=13. + random_state=0, shuffle=False, noise=noise, n_informative=5 ) y = y.reshape(y_shape) - X += 30 alphas = [1e-3, .1, 1., 10., 1e3] loo_ridge = RidgeCV(cv=n_samples, fit_intercept=fit_intercept, @@ -383,11 +405,11 @@ def test_ridge_gcv_sample_weights( alphas = [1e-3, .1, 1., 10., 1e3] rng = np.random.RandomState(0) n_targets = y_shape[-1] if len(y_shape) == 2 else 1 - X, y = datasets.make_regression( + X, y = _make_sparse_offset_regression( n_samples=11, n_features=n_features, n_targets=n_targets, - random_state=0, shuffle=False, noise=noise, bias=13.) + random_state=0, shuffle=False, noise=noise) y = y.reshape(y_shape) - X += 30 + sample_weight = 3 * rng.randn(len(X)) sample_weight = (sample_weight - sample_weight.min() + 1).astype(int) indices = np.repeat(np.arange(X.shape[0]), sample_weight) From 32b6a630ba964626cb0f53fbad6485ee461c6d46 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sat, 4 May 2019 10:45:54 +0200 Subject: [PATCH 094/103] remove _errors_and_values --- sklearn/linear_model/ridge.py | 57 +++--------- sklearn/linear_model/tests/test_ridge.py | 106 +---------------------- 2 files changed, 15 insertions(+), 148 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 31e8df7c52834..13aa9195c422c 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1223,7 +1223,7 @@ def _decompose_gram(self, X, y, sqrt_sw): QT_y = np.dot(Q.T, y) return X_mean, v, Q, QT_y - def _solve_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): + def _solve_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X.X^T (n_features >= n_samples). @@ -1246,14 +1246,6 @@ def _solve_gram(self, alpha, y, sqrt_sw, v, Q, QT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): - G_diag, c = self._solve_gram(alpha, y, sqrt_sw, v, Q, QT_y) - return (c / G_diag) ** 2, c - - def _values_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): - G_diag, c = self._solve_gram(alpha, y, sqrt_sw, v, Q, QT_y) - return y - (c / G_diag), c - def _decompose_covariance_sparse(self, X, y, sqrt_sw): """Eigendecomposition of X^T.X, used when n_samples > n_features.""" n_samples, n_features = X.shape @@ -1355,7 +1347,7 @@ def _decompose_covariance_dense(self, X, y, sqrt_sw): return X_mean, v, U, UT_y def _solve_covariance_dense( - self, alpha, y, sqrt_sw, v, U, UT_y): + self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have an SVD decomposition of X @@ -1375,26 +1367,6 @@ def _solve_covariance_dense( G_diag = G_diag[:, np.newaxis] return G_diag, c - def _errors_covariance_sparse(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._solve_covariance_sparse( - alpha, y, sqrt_sw, X_mean, v, U, UT_y) - return (c / G_diag) ** 2, c - - def _values_covariance_sparse(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._solve_covariance_sparse( - alpha, y, sqrt_sw, X_mean, v, U, UT_y) - return y - (c / G_diag), c - - def _errors_covariance_dense(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._solve_covariance_dense( - alpha, y, sqrt_sw, v, U, UT_y) - return (c / G_diag) ** 2, c - - def _values_covariance_dense(self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): - G_diag, c = self._solve_covariance_dense( - alpha, y, sqrt_sw, v, U, UT_y) - return y - (c / G_diag), c - def fit(self, X, y, sample_weight=None): """Fit Ridge regression model @@ -1431,19 +1403,16 @@ def fit(self, X, y, sample_weight=None): gcv_mode = _check_gcv_mode(X, self.gcv_mode) if gcv_mode == 'eigen': - _decompose = self._decompose_gram - _errors = self._errors_gram - _values = self._values_gram + decompose = self._decompose_gram + solve = self._solve_gram elif gcv_mode == 'svd': # assert n_samples >= n_features if sparse.issparse(X): - _decompose = self._decompose_covariance_sparse - _errors = self._errors_covariance_sparse - _values = self._values_covariance_sparse + decompose = self._decompose_covariance_sparse + solve = self._solve_covariance_sparse else: - _decompose = self._decompose_covariance_dense - _errors = self._errors_covariance_dense - _values = self._values_covariance_dense + decompose = self._decompose_covariance_dense + solve = self._solve_covariance_dense if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight) @@ -1457,14 +1426,14 @@ def fit(self, X, y, sample_weight=None): n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] - X_mean, *decomposition = _decompose(X, y, sqrt_sw) + X_mean, *decomposition = decompose(X, y, sqrt_sw) for i, alpha in enumerate(self.alphas): + G_diag, c = solve( + float(alpha), y, sqrt_sw, X_mean, *decomposition) if error: - out, c = _errors( - float(alpha), y, sqrt_sw, X_mean, *decomposition) + out = (c / G_diag) ** 2 else: - out, c = _values( - float(alpha), y, sqrt_sw, X_mean, *decomposition) + out = y - (c / G_diag) cv_values[:, i] = out.ravel() C.append(c) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 2a17a77dae26e..be41460b5723a 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -491,61 +491,9 @@ def _test_ridge_loo(filter_): else: X_diabetes_ = X_diabetes ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept) - sqrt_sw = np.ones(X_diabetes_.shape[0]) - ridge = Ridge(alpha=1.0, fit_intercept=fit_intercept) - - # because fit_intercept is applied - - # generalized cross-validation (efficient leave-one-out) - decomp = ridge_gcv._decompose_gram(X_diabetes_, y_diabetes, sqrt_sw) - errors, c = ridge_gcv._errors_gram(1.0, y_diabetes, sqrt_sw, *decomp) - values, c = ridge_gcv._values_gram(1.0, y_diabetes, sqrt_sw, *decomp) - - # brute-force leave-one-out: remove one example at a time - errors2 = [] - values2 = [] - for i in range(n_samples): - sel = np.arange(n_samples) != i - X_new = X_diabetes_[sel] - y_new = y_diabetes[sel] - ridge.fit(X_new, y_new) - value = ridge.predict([X_diabetes_[i]])[0] - error = (y_diabetes[i] - value) ** 2 - errors2.append(error) - values2.append(value) - - # check that efficient and brute-force LOO give same results - assert errors == pytest.approx(errors2) - assert values == pytest.approx(values2) - - # generalized cross-validation (efficient leave-one-out, - # SVD variation) - decomp = ridge_gcv._decompose_covariance_dense( - X_diabetes_, y_diabetes, sqrt_sw) - errors3, c = ridge_gcv._errors_covariance_dense( - ridge.alpha, y_diabetes, sqrt_sw, *decomp) - values3, c = ridge_gcv._values_covariance_dense( - ridge.alpha, y_diabetes, sqrt_sw, *decomp) - - # check that efficient and SVD efficient LOO give same results - assert errors == pytest.approx(errors3) - assert values == pytest.approx(values3) - - # generalized cross-validation (efficient leave-one-out, - # SVD variation) - decomp = ridge_gcv._decompose_covariance_sparse( - sp.csr_matrix(X_diabetes_), y_diabetes, sqrt_sw) - errors4, c = ridge_gcv._errors_covariance_sparse( - ridge.alpha, y_diabetes, sqrt_sw, *decomp) - values4, c = ridge_gcv._values_covariance_sparse( - ridge.alpha, y_diabetes, sqrt_sw, *decomp) - - # check that efficient and SVD efficient LOO give same results - assert errors == pytest.approx(errors4) - assert values == pytest.approx(values4) # check best alpha - ridge_gcv.fit(filter_(X_diabetes), y_diabetes, sqrt_sw) + ridge_gcv.fit(filter_(X_diabetes), y_diabetes) alpha_ = ridge_gcv.alpha_ ret.append(alpha_) @@ -553,7 +501,7 @@ def _test_ridge_loo(filter_): f = ignore_warnings scoring = make_scorer(mean_squared_error, greater_is_better=False) ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring) - f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes, sqrt_sw) + f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes) assert ridge_gcv2.alpha_ == pytest.approx(alpha_) # check that we get same best alpha with custom score_func @@ -1071,56 +1019,6 @@ def test_ridge_regression_check_arguments_validity(return_intercept, assert_allclose(out, true_coefs, rtol=0, atol=atol) -def test_solve_gram(): - ridgecv = _RidgeGCV() - ridgecv._with_sw = False - rng = check_random_state(42) - alpha = 1. - n = 5 - sqrt_sw = np.ones(n) - X_m = None - y = rng.randn(n) - v = rng.randn(n) - Q = rng.randn(len(v), len(v)) - QT_y = Q.T.dot(y) - G_diag, c = ridgecv._solve_gram(alpha, y, sqrt_sw, v, Q, QT_y) - - # test that helper function behaves as expected - out, c_ = ridgecv._errors_gram(alpha, y, sqrt_sw, X_m, v, Q, QT_y) - np.testing.assert_array_equal(out, (c / G_diag) ** 2) - np.testing.assert_array_equal(c, c) - - out, c_ = ridgecv._values_gram(alpha, y, sqrt_sw, X_m, v, Q, QT_y) - np.testing.assert_array_equal(out, y - (c / G_diag)) - np.testing.assert_array_equal(c_, c) - - -def test_solve_covariance(): - ridgecv = _RidgeGCV() - rng = check_random_state(42) - alpha = 1. - for n, p in zip((5, 10), (12, 6)): - sqrt_sw = np.ones(n) - y = rng.randn(n) - v = rng.randn(p) - U = rng.randn(n, p) - UT_y = U.T.dot(y) - X_m = np.zeros(p) - G_diag, c = ridgecv._solve_covariance_dense( - alpha, y, sqrt_sw, v, U, UT_y) - - # test that helper function behaves as expected - out, c_ = ridgecv._errors_covariance_dense( - alpha, y, sqrt_sw, X_m, v, U, UT_y) - np.testing.assert_array_equal(out, (c / G_diag) ** 2) - np.testing.assert_array_equal(c, c) - - out, c_ = ridgecv._values_covariance_dense( - alpha, y, sqrt_sw, X_m, v, U, UT_y) - np.testing.assert_array_equal(out, y - (c / G_diag)) - np.testing.assert_array_equal(c_, c) - - def test_ridge_classifier_no_support_multilabel(): X, y = make_multilabel_classification(n_samples=10, random_state=0) assert_raises(ValueError, RidgeClassifier().fit, X, y) From 32b5345223f2889c8fc76961f577c74af833aee8 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sat, 4 May 2019 10:48:03 +0200 Subject: [PATCH 095/103] pep8 --- sklearn/linear_model/tests/test_ridge.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index be41460b5723a..e62fce25c91f3 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -333,8 +333,9 @@ def _make_sparse_offset_regression( n_informative=10, n_targets=1, bias=13., X_offset=30., noise=30., shuffle=True, coef=False, random_state=None): X, y, c = make_regression( - n_samples=n_samples, n_features=n_features, n_informative=n_informative, - n_targets=n_targets, bias=bias, noise=noise, shuffle=shuffle, + n_samples=n_samples, n_features=n_features, + n_informative=n_informative, n_targets=n_targets, bias=bias, + noise=noise, shuffle=shuffle, coef=True, random_state=random_state) if n_features == 1: c = np.asarray([c]) @@ -486,10 +487,6 @@ def _test_ridge_loo(filter_): ret = [] fit_intercept = filter_ == DENSE_FILTER - if fit_intercept: - X_diabetes_ = X_diabetes - X_diabetes.mean(0) - else: - X_diabetes_ = X_diabetes ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept) # check best alpha From afe6ec80292138d437d8c3089010e71edc64dcc4 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sat, 4 May 2019 10:50:55 +0200 Subject: [PATCH 096/103] larger noise in tests --- sklearn/linear_model/tests/test_ridge.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index e62fce25c91f3..ecf5d28933a7c 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -361,8 +361,8 @@ def _make_sparse_offset_regression( 'y_shape, normalize, noise', [ ((11,), True, 1.), - ((11, 1), False, 20.), - ((11, 3), False, 30.), + ((11, 1), False, 30.), + ((11, 3), False, 150.), ] ) def test_ridge_gcv_vs_ridge_loo_cv( @@ -399,7 +399,7 @@ def test_ridge_gcv_vs_ridge_loo_cv( @pytest.mark.parametrize('y_shape, fit_intercept, noise', [((11,), True, 1.), ((11, 1), True, 20.), - ((11, 3), True, 30.), + ((11, 3), True, 150.), ((11, 3), False, 30.)]) def test_ridge_gcv_sample_weights( gcv_mode, X_constructor, fit_intercept, n_features, y_shape, noise): From 692e6f0c46496a8afc5136052c193704accbd8db Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Sat, 4 May 2019 10:58:12 +0200 Subject: [PATCH 097/103] rename 'out' either squared_errors or predictions --- sklearn/linear_model/ridge.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 13aa9195c422c..febddd127f132 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1431,10 +1431,11 @@ def fit(self, X, y, sample_weight=None): G_diag, c = solve( float(alpha), y, sqrt_sw, X_mean, *decomposition) if error: - out = (c / G_diag) ** 2 + squared_errors = (c / G_diag) ** 2 + cv_values[:, i] = squared_errors.ravel() else: - out = y - (c / G_diag) - cv_values[:, i] = out.ravel() + predictions = y - (c / G_diag) + cv_values[:, i] = predictions.ravel() C.append(c) if error: From 3e6e794983bd896fae9c5a67b4c06ceec59c1620 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 6 May 2019 10:49:05 +0200 Subject: [PATCH 098/103] address comments by @thomasjpfan --- sklearn/linear_model/ridge.py | 57 ++++++++++++------------ sklearn/linear_model/tests/test_ridge.py | 2 +- 2 files changed, 29 insertions(+), 30 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index 7bbd513c0b20e..bd125c5862bbe 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -930,13 +930,12 @@ def classes_(self): def _check_gcv_mode(X, gcv_mode): possible_gcv_modes = [None, 'auto', 'svd', 'eigen'] - if (gcv_mode is not None and not isinstance(gcv_mode, str)) or ( - gcv_mode not in possible_gcv_modes): + if gcv_mode not in possible_gcv_modes: raise ValueError( "Unknown value for 'gcv_mode'. " "Got {} instead of one of {}" .format( gcv_mode, possible_gcv_modes)) - if gcv_mode not in [None, 'auto']: + if gcv_mode in ['eigen', 'svd']: return gcv_mode # if X has more rows than columns, use decomposition of X^T.X, # otherwise X.X^T @@ -971,8 +970,8 @@ class _X_operator(sparse.linalg.LinearOperator): """ def __init__(self, X, X_mean, sqrt_sw): - self.n_samples, self.n_features = X.shape - super().__init__(X.dtype, (self.n_samples, self.n_features + 1)) + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_samples, n_features + 1)) self.X = X self.X_mean = X_mean self.sqrt_sw = sqrt_sw @@ -1001,15 +1000,16 @@ class _Xt_operator(sparse.linalg.LinearOperator): """ def __init__(self, X, X_mean, sqrt_sw): - self.n_samples, self.n_features = X.shape - super().__init__(X.dtype, (self.n_features + 1, self.n_samples)) + n_samples, n_features = X.shape + super().__init__(X.dtype, (n_features + 1, n_samples)) self.X = X self.X_mean = X_mean self.sqrt_sw = sqrt_sw def _matvec(self, v): v = v.ravel() - res = np.empty(self.n_features + 1) + n_features = self.shape[0] + res = np.empty(n_features) res[:-1] = ( safe_sparse_dot(self.X.T, v, dense_output=True) - (self.X_mean * self.sqrt_sw.dot(v)) @@ -1018,7 +1018,8 @@ def _matvec(self, v): return res def _matmat(self, v): - res = np.empty((self.n_features + 1, v.shape[1])) + n_features = self.shape[0] + res = np.empty((n_features, v.shape[1])) res[:-1] = ( safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[:, None] * self.sqrt_sw.dot(v) @@ -1089,7 +1090,7 @@ def _diag_dot(self, D, B): D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B - def _compute_gram(self, X, sqrt_sw, center=True): + def _compute_gram(self, X, sqrt_sw): """Computes the Gram matrix with possible centering. If ``center`` is ``True``, compute @@ -1098,7 +1099,7 @@ def _compute_gram(self, X, sqrt_sw, center=True): Parameters ---------- - X : ndarray, shape (n_samples, n_features) + X : {array-like, sparse matrix}, shape (n_samples, n_features) The input uncentered data. sqrt_sw : ndarray, shape (n_samples,) @@ -1114,6 +1115,7 @@ def _compute_gram(self, X, sqrt_sw, center=True): X_mean : ndarray, shape (n_feature,) The mean of ``X`` for each feature. """ + center = self.fit_intercept and sparse.issparse(X) if not center: # in this case centering has been done in preprocessing # or we are not fitting an intercept. @@ -1128,14 +1130,15 @@ def _compute_gram(self, X, sqrt_sw, center=True): X_mean *= n_samples / sqrt_sw.dot(sqrt_sw) X_mX = sqrt_sw[:, None] * safe_sparse_dot( X_mean, X.T, dense_output=True) - X_mX_m = np.empty((n_samples, n_samples), dtype=X.dtype) - X_mX_m[:, :] = np.dot(X_mean, X_mean) + X_mX_m = np.full((n_samples, n_samples), + fill_value=np.dot(X_mean, X_mean), + dtype=X.dtype) X_mX_m *= sqrt_sw X_mX_m *= sqrt_sw[:, None] return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, X_mean) - def _compute_covariance(self, X, sqrt_sw, center=True): + def _compute_covariance(self, X, sqrt_sw): """Computes centered covariance matrix. If ``center`` is ``True``, compute @@ -1145,7 +1148,7 @@ def _compute_covariance(self, X, sqrt_sw, center=True): Parameters ---------- - X : ndarray, shape (n_samples, n_features) + X : sparse matrix, shape (n_samples, n_features) The input uncentered data. sqrt_sw : ndarray, shape (n_samples,) @@ -1161,12 +1164,12 @@ def _compute_covariance(self, X, sqrt_sw, center=True): X_mean : ndarray, shape (n_feature,) The mean of ``X`` for each feature. """ - if not center: + if not self.fit_intercept: # in this case centering has been done in preprocessing # or we are not fitting an intercept. X_mean = np.zeros(X.shape[1], dtype=X.dtype) return safe_sparse_dot(X.T, X, dense_output=True), X_mean - # otherwise X is always sparse + # this function only gets called for sparse X n_samples = X.shape[0] sample_weight_matrix = sparse.dia_matrix( (sqrt_sw, 0), shape=(n_samples, n_samples)) @@ -1217,11 +1220,10 @@ def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1) return diag - def _decompose_gram(self, X, y, sqrt_sw): + def _eigen_decompose_gram(self, X, y, sqrt_sw): """Eigendecomposition of X.X^T, used when n_samples <= n_features""" # if X is dense it has already been centered in preprocessing - center = self.fit_intercept and sparse.issparse(X) - K, X_mean = self._compute_gram(X, sqrt_sw, center=center) + K, X_mean = self._compute_gram(X, sqrt_sw) if self.fit_intercept: # to emulate centering X with sample weights, # ie removing the weighted average, we add a column @@ -1255,12 +1257,11 @@ def _solve_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _decompose_covariance_sparse(self, X, y, sqrt_sw): + def _svd_decompose_covariance(self, X, y, sqrt_sw): """Eigendecomposition of X^T.X, used when n_samples > n_features.""" n_samples, n_features = X.shape cov = np.empty((n_features + 1, n_features + 1)) - cov[:-1, :-1], X_mean = self._compute_covariance( - X, sqrt_sw, center=self.fit_intercept) + cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw) if not self.fit_intercept: cov = cov[:-1, :-1] # to emulate centering X with sample weights, @@ -1286,7 +1287,6 @@ def _solve_covariance_sparse_no_intercept( Used when we have a decomposition of X^T.X (n_features < n_samples and X is sparse), and not fitting an intercept. """ - n_samples, n_features = X.shape w = 1 / (s + alpha) A = (V * w).dot(V.T) AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True)) @@ -1305,7 +1305,6 @@ def _solve_covariance_sparse_intercept( (n_features < n_samples and X is sparse), and we are fitting an intercept. """ - n_samples, n_features = X.shape # the vector [0, 0, ..., 0, 1] # is the eigenvector of X^TX which # corresponds to the intercept; we cancel the regularization on @@ -1341,7 +1340,7 @@ def _solve_covariance_sparse( return self._solve_covariance_sparse_no_intercept( alpha, y, sqrt_sw, X_mean, s, V, X) - def _decompose_covariance_dense(self, X, y, sqrt_sw): + def _svd_decompose_design_matrix(self, X, y, sqrt_sw): # X already centered X_mean = np.zeros(X.shape[1], dtype=X.dtype) if self.fit_intercept: @@ -1414,15 +1413,15 @@ def fit(self, X, y, sample_weight=None): gcv_mode = _check_gcv_mode(X, self.gcv_mode) if gcv_mode == 'eigen': - decompose = self._decompose_gram + decompose = self._eigen_decompose_gram solve = self._solve_gram elif gcv_mode == 'svd': # assert n_samples >= n_features if sparse.issparse(X): - decompose = self._decompose_covariance_sparse + decompose = self._svd_decompose_covariance solve = self._solve_covariance_sparse else: - decompose = self._decompose_covariance_dense + decompose = self._svd_decompose_design_matrix solve = self._solve_covariance_dense if sample_weight is not None: diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 41c3780d70fc6..d1805de9a58f1 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -455,7 +455,7 @@ def test_ridge_gcv_sample_weights( assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-3) -@pytest.mark.parametrize('mode', [True, 1, 5, 'bad', 'gcv', np.arange(3)]) +@pytest.mark.parametrize('mode', [True, 1, 5, 'bad', 'gcv']) def test_check_gcv_mode_error(mode): X, y = make_regression(n_samples=5, n_features=2) gcv = RidgeCV(gcv_mode=mode) From 52ef1e94cad3c2fc3fecab2c39cc29af3b82079b Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 6 May 2019 12:09:11 +0200 Subject: [PATCH 099/103] simplify _compute_gram --- sklearn/linear_model/ridge.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index bd125c5862bbe..c367e64713907 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1130,11 +1130,7 @@ def _compute_gram(self, X, sqrt_sw): X_mean *= n_samples / sqrt_sw.dot(sqrt_sw) X_mX = sqrt_sw[:, None] * safe_sparse_dot( X_mean, X.T, dense_output=True) - X_mX_m = np.full((n_samples, n_samples), - fill_value=np.dot(X_mean, X_mean), - dtype=X.dtype) - X_mX_m *= sqrt_sw - X_mX_m *= sqrt_sw[:, None] + X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean) return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m - X_mX - X_mX.T, X_mean) From e38aa8dcff075c28492aeb5f2dd9d881df09e6a1 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 6 May 2019 12:48:24 +0200 Subject: [PATCH 100/103] add tests for _compute_gram and _compute_covariance --- sklearn/linear_model/tests/test_ridge.py | 40 ++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index d1805de9a58f1..fc5fa1357b3a1 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -330,6 +330,46 @@ def test_x_operator(n_col): assert_allclose(reference_operator.T.dot(Y), operator.T.dot(Y)) +@pytest.mark.parametrize('shape', [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)]) +@pytest.mark.parametrize('uniform_weights', [True, False]) +def test_compute_gram(shape, uniform_weights): + rng = np.random.RandomState(0) + X = rng.randn(*shape) + if uniform_weights: + sw = np.ones(X.shape[0]) + else: + sw = rng.chisquare(1, shape[0]) + sqrt_sw = np.sqrt(sw) + X_mean = np.average(X, axis=0, weights=sw) + X_centered = (X - X_mean) * sqrt_sw[:, None] + true_gram = X_centered.dot(X_centered.T) + X_sparse = sp.csr_matrix(X * sqrt_sw[:, None]) + gcv = _RidgeGCV(fit_intercept=True) + computed_gram, computed_mean = gcv._compute_gram(X_sparse, sqrt_sw) + assert np.allclose(X_mean, computed_mean) + assert np.allclose(true_gram, computed_gram) + + +@pytest.mark.parametrize('shape', [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)]) +@pytest.mark.parametrize('uniform_weights', [True, False]) +def test_compute_covariance(shape, uniform_weights): + rng = np.random.RandomState(0) + X = rng.randn(*shape) + if uniform_weights: + sw = np.ones(X.shape[0]) + else: + sw = rng.chisquare(1, shape[0]) + sqrt_sw = np.sqrt(sw) + X_mean = np.average(X, axis=0, weights=sw) + X_centered = (X - X_mean) * sqrt_sw[:, None] + true_covariance = X_centered.T.dot(X_centered) + X_sparse = sp.csr_matrix(X * sqrt_sw[:, None]) + gcv = _RidgeGCV(fit_intercept=True) + computed_cov, computed_mean = gcv._compute_covariance(X_sparse, sqrt_sw) + assert np.allclose(X_mean, computed_mean) + assert np.allclose(true_covariance, computed_cov) + + def _make_sparse_offset_regression( n_samples=100, n_features=100, proportion_nonzero=.5, n_informative=10, n_targets=1, bias=13., X_offset=30., From aec69bd6c459be5a23ac01ae0502a7f78e43e56d Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Mon, 6 May 2019 12:52:35 +0200 Subject: [PATCH 101/103] assert np.allclose -> assert_allclose --- sklearn/linear_model/tests/test_ridge.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index fc5fa1357b3a1..1cd386ee5d618 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -9,7 +9,6 @@ from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal -from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_greater @@ -346,8 +345,8 @@ def test_compute_gram(shape, uniform_weights): X_sparse = sp.csr_matrix(X * sqrt_sw[:, None]) gcv = _RidgeGCV(fit_intercept=True) computed_gram, computed_mean = gcv._compute_gram(X_sparse, sqrt_sw) - assert np.allclose(X_mean, computed_mean) - assert np.allclose(true_gram, computed_gram) + assert_allclose(X_mean, computed_mean) + assert_allclose(true_gram, computed_gram) @pytest.mark.parametrize('shape', [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)]) @@ -366,8 +365,8 @@ def test_compute_covariance(shape, uniform_weights): X_sparse = sp.csr_matrix(X * sqrt_sw[:, None]) gcv = _RidgeGCV(fit_intercept=True) computed_cov, computed_mean = gcv._compute_covariance(X_sparse, sqrt_sw) - assert np.allclose(X_mean, computed_mean) - assert np.allclose(true_covariance, computed_cov) + assert_allclose(X_mean, computed_mean) + assert_allclose(true_covariance, computed_cov) def _make_sparse_offset_regression( From 26b0efc5fe22702b963ffb26d6027c5080376399 Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 7 May 2019 13:46:33 +0200 Subject: [PATCH 102/103] keep X in float64 --- doc/whats_new/v0.21.rst | 2 +- sklearn/linear_model/ridge.py | 29 ++++++++++++++++------------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/doc/whats_new/v0.21.rst b/doc/whats_new/v0.21.rst index ea43a95d11eb8..38ca0a96040ea 100644 --- a/doc/whats_new/v0.21.rst +++ b/doc/whats_new/v0.21.rst @@ -386,7 +386,7 @@ Support for Python 3.4 and below has been officially dropped. :mod:`sklearn.linear_model` ........................... -- |Enhancement| :mod:`linear_model.ridge` now preserves ``float32`` and +- |Enhancement| :class:`linear_model.Ridge` now preserves ``float32`` and ``float64`` dtypes. :issues:`8769` and :issues:`11000` by :user:`Guillaume Lemaitre `, and :user:`Joan Massich ` diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index c367e64713907..efcfc641b3ac9 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1009,7 +1009,7 @@ def __init__(self, X, X_mean, sqrt_sw): def _matvec(self, v): v = v.ravel() n_features = self.shape[0] - res = np.empty(n_features) + res = np.empty(n_features, dtype=self.X.dtype) res[:-1] = ( safe_sparse_dot(self.X.T, v, dense_output=True) - (self.X_mean * self.sqrt_sw.dot(v)) @@ -1019,7 +1019,7 @@ def _matvec(self, v): def _matmat(self, v): n_features = self.shape[0] - res = np.empty((n_features, v.shape[1])) + res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype) res[:-1] = ( safe_sparse_dot(self.X.T, v, dense_output=True) - self.X_mean[:, None] * self.sqrt_sw.dot(v) @@ -1201,7 +1201,7 @@ def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw): intercept_col = sqrt_sw scale = sqrt_sw batch_size = X.shape[1] - diag = np.empty(X.shape[0]) + diag = np.empty(X.shape[0], dtype=X.dtype) for start in range(0, X.shape[0], batch_size): batch = slice(start, min(X.shape[0], start + batch_size), 1) X_batch = np.empty( @@ -1256,7 +1256,7 @@ def _solve_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): def _svd_decompose_covariance(self, X, y, sqrt_sw): """Eigendecomposition of X^T.X, used when n_samples > n_features.""" n_samples, n_features = X.shape - cov = np.empty((n_features + 1, n_features + 1)) + cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype) cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw) if not self.fit_intercept: cov = cov[:-1, :-1] @@ -1377,10 +1377,10 @@ def fit(self, X, y, sample_weight=None): Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] - Training data + Training data. Will be cast to float64 if necessary y : array-like, shape = [n_samples] or [n_samples, n_targets] - Target values. Will be cast to X's dtype if necessary + Target values. Will be cast to float64 if necessary sample_weight : float or array-like of shape [n_samples] Sample weight @@ -1389,14 +1389,15 @@ def fit(self, X, y, sample_weight=None): ------- self : object """ + X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], + dtype=[np.float64], + multi_output=True, y_numeric=True) + if np.any(self.alphas <= 0): raise ValueError( "alphas must be positive. Got {} containing some " "negative or null value instead.".format(self.alphas)) - X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], - dtype=[np.float64, np.float32], - multi_output=True, y_numeric=True) if sample_weight is not None and not isinstance(sample_weight, float): sample_weight = check_array(sample_weight, ensure_2d=False, dtype=X.dtype) @@ -1412,7 +1413,6 @@ def fit(self, X, y, sample_weight=None): decompose = self._eigen_decompose_gram solve = self._solve_gram elif gcv_mode == 'svd': - # assert n_samples >= n_features if sparse.issparse(X): decompose = self._svd_decompose_covariance solve = self._solve_covariance_sparse @@ -1430,7 +1430,8 @@ def fit(self, X, y, sample_weight=None): error = scorer is None n_y = 1 if len(y.shape) == 1 else y.shape[1] - cv_values = np.zeros((n_samples * n_y, len(self.alphas))) + cv_values = np.zeros((n_samples * n_y, len(self.alphas)), + dtype=X.dtype) C = [] X_mean, *decomposition = decompose(X, y, sqrt_sw) for i, alpha in enumerate(self.alphas): @@ -1495,7 +1496,8 @@ def fit(self, X, y, sample_weight=None): Parameters ---------- X : array-like, shape = [n_samples, n_features] - Training data + Training data. If using GCV, will be cast to float64 + if necessary. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. Will be cast to X's dtype if necessary @@ -1782,7 +1784,8 @@ def fit(self, X, y, sample_weight=None): ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples - and n_features is the number of features. + and n_features is the number of features. When using GCV, + will be cast to float64 if necessary. y : array-like, shape (n_samples,) Target values. Will be cast to X's dtype if necessary From b0f7f4a4fbb359dccae0b293af01c603a79bbaae Mon Sep 17 00:00:00 2001 From: Jerome Dockes Date: Tue, 7 May 2019 13:52:39 +0200 Subject: [PATCH 103/103] more explicit decompose / solve private function names now _{decomposition method used}_decompose_{decomposed matrix} and _solve_{provided decomposition}_{matrix whose decomposition is provided} e.g. _eigen_decompose_covariance, _solve_eigen_covariance --- sklearn/linear_model/ridge.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py index efcfc641b3ac9..0e54126e52c33 100644 --- a/sklearn/linear_model/ridge.py +++ b/sklearn/linear_model/ridge.py @@ -1230,7 +1230,7 @@ def _eigen_decompose_gram(self, X, y, sqrt_sw): QT_y = np.dot(Q.T, y) return X_mean, v, Q, QT_y - def _solve_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): + def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) Used when we have a decomposition of X.X^T (n_features >= n_samples). @@ -1253,7 +1253,7 @@ def _solve_gram(self, alpha, y, sqrt_sw, X_mean, v, Q, QT_y): G_diag = G_diag[:, np.newaxis] return G_diag, c - def _svd_decompose_covariance(self, X, y, sqrt_sw): + def _eigen_decompose_covariance(self, X, y, sqrt_sw): """Eigendecomposition of X^T.X, used when n_samples > n_features.""" n_samples, n_features = X.shape cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype) @@ -1276,7 +1276,7 @@ def _svd_decompose_covariance(self, X, y, sqrt_sw): V = V[:, nullspace_dim:] return X_mean, s, V, X - def _solve_covariance_sparse_no_intercept( + def _solve_eigen_covariance_no_intercept( self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1293,7 +1293,7 @@ def _solve_covariance_sparse_no_intercept( hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _solve_covariance_sparse_intercept( + def _solve_eigen_covariance_intercept( self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1323,7 +1323,7 @@ def _solve_covariance_sparse_intercept( hat_diag = hat_diag[:, np.newaxis] return (1 - hat_diag) / alpha, (y - y_hat) / alpha - def _solve_covariance_sparse( + def _solve_eigen_covariance( self, alpha, y, sqrt_sw, X_mean, s, V, X): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1331,9 +1331,9 @@ def _solve_covariance_sparse( (n_features < n_samples and X is sparse). """ if self.fit_intercept: - return self._solve_covariance_sparse_intercept( + return self._solve_eigen_covariance_intercept( alpha, y, sqrt_sw, X_mean, s, V, X) - return self._solve_covariance_sparse_no_intercept( + return self._solve_eigen_covariance_no_intercept( alpha, y, sqrt_sw, X_mean, s, V, X) def _svd_decompose_design_matrix(self, X, y, sqrt_sw): @@ -1350,7 +1350,7 @@ def _svd_decompose_design_matrix(self, X, y, sqrt_sw): UT_y = np.dot(U.T, y) return X_mean, v, U, UT_y - def _solve_covariance_dense( + def _solve_svd_design_matrix( self, alpha, y, sqrt_sw, X_mean, v, U, UT_y): """Compute dual coefficients and diagonal of (Identity - Hat_matrix) @@ -1411,14 +1411,14 @@ def fit(self, X, y, sample_weight=None): if gcv_mode == 'eigen': decompose = self._eigen_decompose_gram - solve = self._solve_gram + solve = self._solve_eigen_gram elif gcv_mode == 'svd': if sparse.issparse(X): - decompose = self._svd_decompose_covariance - solve = self._solve_covariance_sparse + decompose = self._eigen_decompose_covariance + solve = self._solve_eigen_covariance else: decompose = self._svd_decompose_design_matrix - solve = self._solve_covariance_dense + solve = self._solve_svd_design_matrix if sample_weight is not None: X, y = _rescale_data(X, y, sample_weight)