diff --git a/sklearn/decomposition/incremental_pca.py b/sklearn/decomposition/incremental_pca.py index 13e51090dd82e..39ed37293bb2d 100644 --- a/sklearn/decomposition/incremental_pca.py +++ b/sklearn/decomposition/incremental_pca.py @@ -243,9 +243,10 @@ def partial_fit(self, X, y=None, check_input=True): # Update stats - they are 0 if this is the fisrt step col_mean, col_var, n_total_samples = \ - _incremental_mean_and_var(X, last_mean=self.mean_, - last_variance=self.var_, - last_sample_count=self.n_samples_seen_) + _incremental_mean_and_var( + X, last_mean=self.mean_, last_variance=self.var_, + last_sample_count=self.n_samples_seen_ * np.ones(n_features)) + n_total_samples = n_total_samples[0] # Whitening if self.n_samples_seen_ == 0: diff --git a/sklearn/preprocessing/data.py b/sklearn/preprocessing/data.py index bbd2fae10c0ec..5b3b38c95c453 100644 --- a/sklearn/preprocessing/data.py +++ b/sklearn/preprocessing/data.py @@ -619,13 +619,19 @@ def partial_fit(self, X, y=None): Ignored """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, - warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) + warn_on_dtype=True, estimator=self, + force_all_finite='allow-nan', dtype=FLOAT_DTYPES) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): + # FIXME: remove this check statement + X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, + warn_on_dtype=True, estimator=self, + dtype=FLOAT_DTYPES) + if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " @@ -646,6 +652,9 @@ def partial_fit(self, X, y=None): self.mean_ = None self.var_ = None else: + X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, + warn_on_dtype=True, estimator=self, + dtype=FLOAT_DTYPES) # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 @@ -656,8 +665,9 @@ def partial_fit(self, X, y=None): self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ - _incremental_mean_and_var(X, self.mean_, self.var_, - self.n_samples_seen_) + _incremental_mean_and_var( + X, self.mean_, self.var_, + self.n_samples_seen_ * np.ones(X.shape[1])) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) diff --git a/sklearn/preprocessing/tests/test_data.py b/sklearn/preprocessing/tests/test_data.py index f4c3d3d571772..2b6d3c3d16890 100644 --- a/sklearn/preprocessing/tests/test_data.py +++ b/sklearn/preprocessing/tests/test_data.py @@ -203,7 +203,7 @@ def test_standard_scaler_1d(): np.zeros_like(n_features)) assert_array_almost_equal(X_scaled.mean(axis=0), .0) assert_array_almost_equal(X_scaled.std(axis=0), 1.) - assert_equal(scaler.n_samples_seen_, X.shape[0]) + assert_array_equal(scaler.n_samples_seen_, X.shape[0]) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) @@ -283,7 +283,7 @@ def test_scaler_2d_arrays(): scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) - assert_equal(scaler.n_samples_seen_, n_samples) + assert_array_equal(scaler.n_samples_seen_, n_samples) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) @@ -399,7 +399,8 @@ def test_standard_scaler_partial_fit(): assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_) assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones - assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) + assert_array_almost_equal( + scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) # Test std after 1 step batch0 = slice(0, chunk_size) @@ -423,10 +424,11 @@ def test_standard_scaler_partial_fit(): assert_correct_incr(i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, - n_samples_seen=scaler_incr.n_samples_seen_) + n_samples_seen=scaler_incr.n_samples_seen_[0]) assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_) - assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) + assert_array_almost_equal(scaler_batch.n_samples_seen_, + scaler_incr.n_samples_seen_) def test_standard_scaler_partial_fit_numerical_stability(): @@ -515,7 +517,7 @@ def test_standard_scaler_trasform_with_partial_fit(): assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal assert_array_less(zero, scaler_incr.scale_ + epsilon) # (i+1) because the Scaler has been already fitted - assert_equal((i + 1), scaler_incr.n_samples_seen_) + assert_array_equal((i + 1), scaler_incr.n_samples_seen_) def test_min_max_scaler_iris(): diff --git a/sklearn/utils/estimator_checks.py b/sklearn/utils/estimator_checks.py index b079c37f7bea2..9add53f720b46 100644 --- a/sklearn/utils/estimator_checks.py +++ b/sklearn/utils/estimator_checks.py @@ -70,6 +70,7 @@ 'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression', 'RANSACRegressor', 'RadiusNeighborsRegressor', 'RandomForestRegressor', 'Ridge', 'RidgeCV'] +ALLOW_NAN = ['StandardScaler'] def _yield_non_meta_checks(name, estimator): @@ -1024,6 +1025,8 @@ def check_estimators_nan_inf(name, estimator_orig): error_string_transform = ("Estimator doesn't check for NaN and inf in" " transform.") for X_train in [X_train_nan, X_train_inf]: + if np.any(np.isnan(X_train)) and name in ALLOW_NAN: + continue # catch deprecation warnings with ignore_warnings(category=(DeprecationWarning, FutureWarning)): estimator = clone(estimator_orig) diff --git a/sklearn/utils/extmath.py b/sklearn/utils/extmath.py index e95ceb57497ae..7b96e4d0a1f10 100644 --- a/sklearn/utils/extmath.py +++ b/sklearn/utils/extmath.py @@ -643,7 +643,7 @@ def make_nonnegative(X, min_value=0): def _incremental_mean_and_var(X, last_mean=.0, last_variance=None, - last_sample_count=0): + last_sample_count=0, ignore_nan=False): """Calculate mean update and a Youngs and Cramer variance update. last_mean and last_variance are statistics computed at the last step by the @@ -664,7 +664,7 @@ def _incremental_mean_and_var(X, last_mean=.0, last_variance=None, last_variance : array-like, shape: (n_features,) - last_sample_count : int + last_sample_count : array-like, shape: (n_features,) Returns ------- @@ -673,7 +673,7 @@ def _incremental_mean_and_var(X, last_mean=.0, last_variance=None, updated_variance : array, shape (n_features,) If None, only mean is computed - updated_sample_count : int + updated_sample_count : array shape (n_features,) References ---------- @@ -689,28 +689,41 @@ def _incremental_mean_and_var(X, last_mean=.0, last_variance=None, # new = the current increment # updated = the aggregated stats last_sum = last_mean * last_sample_count - new_sum = X.sum(axis=0) + sum_func = np.nansum if ignore_nan else np.sum + new_sum = sum_func(X, axis=0) + new_sum[np.isnan(new_sum)] = 0 - new_sample_count = X.shape[0] + new_sample_count = np.sum(~np.isnan(X), axis=0) updated_sample_count = last_sample_count + new_sample_count - updated_mean = (last_sum + new_sum) / updated_sample_count + with np.errstate(divide="ignore"): # as division by 0 might happen + updated_mean = (last_sum + new_sum) / updated_sample_count + updated_mean[np.logical_not(updated_sample_count)] = 0 if last_variance is None: updated_variance = None else: - new_unnormalized_variance = X.var(axis=0) * new_sample_count - if last_sample_count == 0: # Avoid division by 0 - updated_unnormalized_variance = new_unnormalized_variance - else: + var_func = np.nanvar if ignore_nan else np.var + new_unnormalized_variance = var_func(X, axis=0) + new_unnormalized_variance[~np.isfinite(new_unnormalized_variance)] = 0 + new_unnormalized_variance *= new_sample_count + last_unnormalized_variance = last_variance * last_sample_count + with np.errstate(divide="ignore"): last_over_new_count = last_sample_count / new_sample_count - last_unnormalized_variance = last_variance * last_sample_count updated_unnormalized_variance = ( - last_unnormalized_variance + - new_unnormalized_variance + last_over_new_count / updated_sample_count * (last_sum / last_over_new_count - new_sum) ** 2) - updated_variance = updated_unnormalized_variance / updated_sample_count + # updated_unnormalized_variance can be both NaN or Inf + updated_unnormalized_variance[~np.isfinite( + updated_unnormalized_variance)] = 0 + updated_unnormalized_variance += (last_unnormalized_variance + + new_unnormalized_variance) + + with np.errstate(divide="ignore"): + updated_variance = (updated_unnormalized_variance / + updated_sample_count) + # As division by Zero might happen + updated_variance[np.logical_not(updated_sample_count)] = 0 return updated_mean, updated_variance, updated_sample_count diff --git a/sklearn/utils/sparsefuncs_fast.pyx b/sklearn/utils/sparsefuncs_fast.pyx index 481f2137fab77..0bd2f5150da9e 100644 --- a/sklearn/utils/sparsefuncs_fast.pyx +++ b/sklearn/utils/sparsefuncs_fast.pyx @@ -15,6 +15,8 @@ import numpy as np import scipy.sparse as sp cimport cython from cython cimport floating +from numpy.math cimport isnan +import warnings np.import_array() @@ -54,7 +56,7 @@ def _csr_row_norms(np.ndarray[floating, ndim=1, mode="c"] X_data, return norms -def csr_mean_variance_axis0(X): +def csr_mean_variance_axis0(X, ignore_nan=True): """Compute mean and variance along axis 0 on a CSR matrix Parameters @@ -74,12 +76,13 @@ def csr_mean_variance_axis0(X): """ if X.dtype != np.float32: X = X.astype(np.float64) - return _csr_mean_variance_axis0(X.data, X.shape, X.indices) + return _csr_mean_variance_axis0(X.data, X.shape, X.indices, ignore_nan) def _csr_mean_variance_axis0(np.ndarray[floating, ndim=1, mode="c"] X_data, shape, - np.ndarray[int, ndim=1] X_indices): + np.ndarray[int, ndim=1] X_indices, + ignore_nan=True): # Implement the function here since variables using fused types # cannot be declared directly and can only be passed as function arguments cdef unsigned int n_samples = shape[0] @@ -94,6 +97,8 @@ def _csr_mean_variance_axis0(np.ndarray[floating, ndim=1, mode="c"] X_data, cdef np.ndarray[floating, ndim=1] means # variances[j] contains the variance of feature j cdef np.ndarray[floating, ndim=1] variances + # n_samples_feat[j] contains the n_samples of feature j + cdef np.ndarray[floating, ndim=1] n_samples_feat if floating is float: dtype = np.float32 @@ -102,6 +107,7 @@ def _csr_mean_variance_axis0(np.ndarray[floating, ndim=1, mode="c"] X_data, means = np.zeros(n_features, dtype=dtype) variances = np.zeros_like(means, dtype=dtype) + n_samples_feat = np.ones(n_features, dtype=dtype) * n_samples # counts[j] contains the number of samples where feature j is non-zero cdef np.ndarray[int, ndim=1] counts = np.zeros(n_features, @@ -109,24 +115,36 @@ def _csr_mean_variance_axis0(np.ndarray[floating, ndim=1, mode="c"] X_data, for i in xrange(non_zero): col_ind = X_indices[i] - means[col_ind] += X_data[i] + x_i = X_data[i] + if ignore_nan and isnan(x_i): + n_samples_feat[col_ind] -= 1 + continue + means[col_ind] += x_i - means /= n_samples + with np.errstate(divide="ignore"): + # as division by 0 might happen + means /= n_samples_feat + means[np.logical_not(n_samples_feat)] = 0 for i in xrange(non_zero): col_ind = X_indices[i] - diff = X_data[i] - means[col_ind] + x_i = X_data[i] + if ignore_nan and isnan(x_i): + continue + diff = x_i - means[col_ind] variances[col_ind] += diff * diff counts[col_ind] += 1 - for i in xrange(n_features): - variances[i] += (n_samples - counts[i]) * means[i] ** 2 - variances[i] /= n_samples + variances += (n_samples_feat - counts) * means ** 2 + with np.errstate(divide="ignore"): + # as division by 0 might happen + variances /= n_samples_feat + variances[np.logical_not(n_samples_feat)] = 0 return means, variances -def csc_mean_variance_axis0(X): +def csc_mean_variance_axis0(X, ignore_nan=True): """Compute mean and variance along axis 0 on a CSC matrix Parameters @@ -146,13 +164,14 @@ def csc_mean_variance_axis0(X): """ if X.dtype != np.float32: X = X.astype(np.float64) - return _csc_mean_variance_axis0(X.data, X.shape, X.indices, X.indptr) + return _csc_mean_variance_axis0(X.data, X.shape, X.indices, X.indptr, + ignore_nan) def _csc_mean_variance_axis0(np.ndarray[floating, ndim=1] X_data, shape, np.ndarray[int, ndim=1] X_indices, - np.ndarray[int, ndim=1] X_indptr): + np.ndarray[int, ndim=1] X_indptr, ignore_nan=True): # Implement the function here since variables using fused types # cannot be declared directly and can only be passed as function arguments cdef unsigned int n_samples = shape[0] @@ -164,6 +183,7 @@ def _csc_mean_variance_axis0(np.ndarray[floating, ndim=1] X_data, cdef unsigned int startptr cdef unsigned int endptr cdef floating diff + cdef floating n_samples_feat # means[j] contains the mean of feature j cdef np.ndarray[floating, ndim=1] means @@ -182,17 +202,32 @@ def _csc_mean_variance_axis0(np.ndarray[floating, ndim=1] X_data, startptr = X_indptr[i] endptr = X_indptr[i + 1] counts = endptr - startptr + n_samples_feat = n_samples for j in xrange(startptr, endptr): - means[i] += X_data[j] - means[i] /= n_samples + x_j = X_data[j] + if ignore_nan and isnan(x_j): + n_samples_feat -= 1 + continue + means[i] += x_j + + if n_samples_feat != 0: + means[i] /= n_samples_feat + else: + means[i] = 0 for j in xrange(startptr, endptr): - diff = X_data[j] - means[i] + x_j = X_data[j] + if ignore_nan and isnan(x_j): + continue; + diff = x_j - means[i] variances[i] += diff * diff variances[i] += (n_samples - counts) * means[i] * means[i] - variances[i] /= n_samples + if n_samples_feat != 0: + variances[i] /= n_samples_feat + else: + variances[i] = 0 return means, variances diff --git a/sklearn/utils/tests/test_extmath.py b/sklearn/utils/tests/test_extmath.py index f53b814c70084..96d4fd9f6b36c 100644 --- a/sklearn/utils/tests/test_extmath.py +++ b/sklearn/utils/tests/test_extmath.py @@ -5,6 +5,7 @@ # License: BSD 3 clause import numpy as np + from scipy import sparse from scipy import linalg from scipy import stats @@ -467,6 +468,31 @@ def naive_log_logistic(x): assert_array_almost_equal(log_logistic(extreme_x), [-100, 0]) +def test_incremental_mean_and_var_nan(): + # Test mean and variance when an array has floating NaN values + + X1 = np.array([[600, 470, 170, 430, np.nan], + [600, np.nan, 170, 430, 300], + [np.nan, np.nan, np.nan, np.nan, np.nan], + [600, 470, 170, 430, 300]]) + + X2 = np.array([[np.nan, np.nan, np.nan, np.nan, np.nan], + [600, 470, 170, 430, 300]]) + A = np.concatenate((X1, X2,), axis=0) + X_means, X_variances, X_count = _incremental_mean_and_var( + X1, np.array([0, 0, 0, 0, 0]), np.array([0, 0, 0, 0, 0]), + np.array([0, 0, 0, 0, 0]), ignore_nan=True) + A_means = np.nanmean(A, axis=0) + A_variances = np.nanvar(A, axis=0) + A_count = np.array([4, 3, 4, 4, 3]) + + final_means, final_variances, final_count = _incremental_mean_and_var( + X2, X_means, X_variances, X_count, ignore_nan=True) + assert_almost_equal(A_means, final_means) + assert_almost_equal(A_variances, final_variances) + assert_almost_equal(A_count, final_count) + + def test_incremental_variance_update_formulas(): # Test Youngs and Cramer incremental variance formulas. # Doggie data from http://www.mathsisfun.com/data/standard-deviation.html @@ -483,7 +509,7 @@ def test_incremental_variance_update_formulas(): old_sample_count = X1.shape[0] final_means, final_variances, final_count = \ _incremental_mean_and_var(X2, old_means, old_variances, - old_sample_count) + old_sample_count * np.ones(X2.shape[1])) assert_almost_equal(final_means, A.mean(axis=0), 6) assert_almost_equal(final_variances, A.var(axis=0), 6) assert_almost_equal(final_count, A.shape[0]) @@ -562,7 +588,8 @@ def naive_mean_variance_update(x, last_mean, last_variance, for i in range(A1.shape[0]): mean, var, n = \ _incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])), - mean, var, n) + mean, var, n * np.ones(A1.shape[1])) + n = n[0] assert_equal(n, A.shape[0]) assert_array_almost_equal(A.mean(axis=0), mean) assert_greater(tol, np.abs(stable_var(A) - var).max()) @@ -589,9 +616,10 @@ def test_incremental_variance_ddof(): else: result = _incremental_mean_and_var( batch, incremental_means, incremental_variances, - sample_count) + sample_count * np.ones(batch.shape[1])) (incremental_means, incremental_variances, incremental_count) = result + incremental_count = incremental_count[0] sample_count += batch.shape[0] calculated_means = np.mean(X[:j], axis=0) diff --git a/sklearn/utils/tests/test_sparsefuncs.py b/sklearn/utils/tests/test_sparsefuncs.py index f2b35e7459833..54516e797d19a 100644 --- a/sklearn/utils/tests/test_sparsefuncs.py +++ b/sklearn/utils/tests/test_sparsefuncs.py @@ -17,10 +17,26 @@ count_nonzero, csc_median_axis_0) from sklearn.utils.sparsefuncs_fast import (assign_rows_csr, inplace_csr_row_normalize_l1, - inplace_csr_row_normalize_l2) + inplace_csr_row_normalize_l2, + csr_mean_variance_axis0, + csc_mean_variance_axis0) from sklearn.utils.testing import assert_raises +def test_csr_csc_mean_axis0(): + X = np.array([[600, np.nan, 0, 0, np.nan], + [np.nan, 0, np.nan, np.nan, 0], + [600, np.nan, 0, 0, np.nan]]) + X_csr = sp.csr_matrix(X) + X_means, X_variance = csr_mean_variance_axis0(X_csr) + assert_array_almost_equal(X_means, np.array([600, 0, 0, 0, 0])) + assert_array_almost_equal(X_variance, np.array([0, 0, 0, 0, 0])) + X_csc = sp.csc_matrix(X) + X_means, X_variance = csc_mean_variance_axis0(X_csc) + assert_array_almost_equal(X_means, np.array([600, 0, 0, 0, 0])) + assert_array_almost_equal(X_variance, np.array([0, 0, 0, 0, 0])) + + def test_mean_variance_axis0(): X, _ = make_classification(5, 4, random_state=0) # Sparsify the array a little bit