diff --git a/sklearn/cluster/tests/test_hierarchical.py b/sklearn/cluster/tests/test_hierarchical.py index 8c4f06598bae3..ffc5a564cc7d5 100644 --- a/sklearn/cluster/tests/test_hierarchical.py +++ b/sklearn/cluster/tests/test_hierarchical.py @@ -4,7 +4,6 @@ """ # Authors: Vincent Michel, 2010, Gael Varoquaux 2012 # License: BSD 3 clause -import warnings from tempfile import mkdtemp import numpy as np @@ -19,6 +18,7 @@ from sklearn.cluster import Ward, WardAgglomeration, ward_tree from sklearn.cluster.hierarchical import _hc_cut from sklearn.feature_extraction.image import grid_to_graph +from sklearn.utils.testing import assert_warns def test_structured_ward_tree(): @@ -46,14 +46,12 @@ def test_unstructured_ward_tree(): rnd = np.random.RandomState(0) X = rnd.randn(50, 100) for this_X in (X, X[0]): - with warnings.catch_warnings(record=True) as warning_list: - warnings.simplefilter("always", UserWarning) - warnings.simplefilter("ignore", DeprecationWarning) - # With specified a number of clusters just for the sake of - # raising a warning and testing the warning code - children, n_nodes, n_leaves, parent = ward_tree(this_X.T, - n_clusters=10) - assert_equal(len(warning_list), 1) + # With specified a number of clusters just for the sake of + # raising a warning and testing the warning code + children, n_nodes, n_leaves, parent = assert_warns(UserWarning, + ward_tree, + this_X.T, + n_clusters=10) n_nodes = 2 * X.shape[1] - 1 assert_equal(len(children) + n_leaves, n_nodes) @@ -196,8 +194,7 @@ def test_connectivity_fixing_non_lil(): m = np.array([[True, False], [False, True]]) c = grid_to_graph(n_x=2, n_y=2, mask=m) w = Ward(connectivity=c) - with warnings.catch_warnings(record=True): - w.fit(x) + assert_warns(UserWarning, w.fit, x) if __name__ == '__main__': diff --git a/sklearn/cluster/tests/test_k_means.py b/sklearn/cluster/tests/test_k_means.py index fef61737e292d..28f73b0f2b384 100644 --- a/sklearn/cluster/tests/test_k_means.py +++ b/sklearn/cluster/tests/test_k_means.py @@ -1,6 +1,5 @@ """Testing for K-means""" import sys -import warnings import numpy as np from scipy import sparse as sp @@ -12,9 +11,10 @@ from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true - from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less +from sklearn.utils.testing import assert_warns + from sklearn.utils.extmath import row_norms from sklearn.utils.fixes import unique from sklearn.metrics.cluster import v_measure_score @@ -44,9 +44,8 @@ def test_kmeans_dtype(): X = rnd.normal(size=(40, 2)) X = (X * 10).astype(np.uint8) km = KMeans(n_init=1).fit(X) - with warnings.catch_warnings(record=True) as w: - assert_array_equal(km.labels_, km.predict(X)) - assert_equal(len(w), 1) + pred_x = assert_warns(RuntimeWarning, km.predict, X) + assert_array_equal(km.labels_, pred_x) def test_labels_assignment_and_inertia(): @@ -287,10 +286,7 @@ def test_minibatch_init_with_large_k(): mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20) # Check that a warning is raised, as the number clusters is larger # than the init_size - with warnings.catch_warnings(record=True) as warn_queue: - mb_k_means.fit(X) - - assert_equal(len(warn_queue), 1) + assert_warns(RuntimeWarning, mb_k_means.fit, X) def test_minibatch_k_means_random_init_dense_array(): @@ -610,9 +606,7 @@ def test_k_means_function(): assert_greater(inertia, 0.0) # check warning when centers are passed - with warnings.catch_warnings(record=True) as w: - k_means(X, n_clusters=n_clusters, init=centers) - assert_equal(len(w), 1) + assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters, init=centers) # to many clusters desired assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1) diff --git a/sklearn/covariance/tests/test_covariance.py b/sklearn/covariance/tests/test_covariance.py index b5fd31e773bc3..b3e5cc9608d30 100644 --- a/sklearn/covariance/tests/test_covariance.py +++ b/sklearn/covariance/tests/test_covariance.py @@ -5,12 +5,12 @@ # License: BSD 3 clause import numpy as np -import warnings from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises +from sklearn.utils.testing import assert_warns from sklearn import datasets from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \ @@ -59,8 +59,8 @@ def test_covariance(): # test with one sample X_1sample = np.arange(5) cov = EmpiricalCovariance() - with warnings.catch_warnings(record=True): - cov.fit(X_1sample) + + assert_warns(UserWarning, cov.fit, X_1sample) # test integer type X_integer = np.asarray([[0, 1], [1, 0]]) @@ -182,8 +182,7 @@ def test_ledoit_wolf(): # test with one sample X_1sample = np.arange(5) lw = LedoitWolf() - with warnings.catch_warnings(record=True): - lw.fit(X_1sample) + assert_warns(UserWarning, lw.fit, X_1sample) # test shrinkage coeff on a simple data set (without saving precision) lw = LedoitWolf(store_precision=False) @@ -254,8 +253,7 @@ def test_oas(): # test with one sample X_1sample = np.arange(5) oa = OAS() - with warnings.catch_warnings(record=True): - oa.fit(X_1sample) + assert_warns(UserWarning, oa.fit, X_1sample) # test shrinkage coeff on a simple data set (without saving precision) oa = OAS(store_precision=False) diff --git a/sklearn/decomposition/tests/test_factor_analysis.py b/sklearn/decomposition/tests/test_factor_analysis.py index 5cce31358451f..05c1dc6c18009 100644 --- a/sklearn/decomposition/tests/test_factor_analysis.py +++ b/sklearn/decomposition/tests/test_factor_analysis.py @@ -2,10 +2,9 @@ # Alexandre Gramfort # Licence: BSD3 -import warnings import numpy as np -from sklearn.utils.testing import assert_true +from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less @@ -69,16 +68,11 @@ def test_factor_analysis(): fa1, fa2 = fas for attr in ['loglike_', 'components_', 'noise_variance_']: assert_almost_equal(f(fa1, attr), f(fa2, attr)) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always', ConvergenceWarning) - fa1.max_iter = 1 - fa1.verbose = True - fa1.fit(X) - assert_true(w[-1].category == ConvergenceWarning) - - warnings.simplefilter('always', DeprecationWarning) - FactorAnalysis(verbose=1) - assert_true(w[-1].category == DeprecationWarning) + + fa1.max_iter = 1 + fa1.verbose = True + assert_warns(ConvergenceWarning, fa1.fit, X) + assert_warns(DeprecationWarning, FactorAnalysis, verbose=1) # Test get_covariance and get_precision with n_components == n_features # with n_components < n_features and with n_components == 0 diff --git a/sklearn/decomposition/tests/test_fastica.py b/sklearn/decomposition/tests/test_fastica.py index 6f875c2cf9293..94d832b4f75dd 100644 --- a/sklearn/decomposition/tests/test_fastica.py +++ b/sklearn/decomposition/tests/test_fastica.py @@ -1,7 +1,6 @@ """ Test the fastica algorithm. """ -import warnings import itertools import numpy as np @@ -14,6 +13,7 @@ from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_equal +from sklearn.utils.testing import assert_warns from sklearn.decomposition import FastICA, fastica, PCA from sklearn.decomposition.fastica_ import _gs_decorrelation @@ -137,16 +137,11 @@ def g_test(x): def test_fastica_nowhiten(): m = [[0, 1], [1, 0]] - ica = FastICA(whiten=False, random_state=0) - ica.fit(m) - ica.mixing_ # test for issue #697 - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - ica = FastICA(n_components=1, whiten=False, random_state=0) - ica.fit(m) # should raise warning - assert_true(len(w) == 1) # 1 warning should be raised + ica = FastICA(n_components=1, whiten=False, random_state=0) + assert_warns(UserWarning, ica.fit, m) + assert_true(hasattr(ica, 'mixing_')) def test_non_square_fastica(add_noise=False): diff --git a/sklearn/decomposition/tests/test_pca.py b/sklearn/decomposition/tests/test_pca.py index 0b17458c24568..7615043a3355f 100644 --- a/sklearn/decomposition/tests/test_pca.py +++ b/sklearn/decomposition/tests/test_pca.py @@ -1,5 +1,3 @@ -import warnings - import numpy as np from scipy.sparse import csr_matrix @@ -7,7 +5,8 @@ from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal -from sklearn.utils.testing import assert_less, assert_greater +from sklearn.utils.testing import assert_greater +from sklearn.utils.testing import assert_warns from sklearn import datasets from sklearn.decomposition import PCA @@ -45,7 +44,8 @@ def test_pca(): pca.fit(X) cov = pca.get_covariance() precision = pca.get_precision() - assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12) + assert_array_almost_equal(np.dot(cov, precision), + np.eye(X.shape[1]), 12) def test_whitening(): @@ -190,11 +190,8 @@ def test_sparse_randomized_pca_check_projection(): Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5]) Xt = csr_matrix(Xt) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always', DeprecationWarning) - Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt) - assert_equal(len(w), 1) - assert_equal(w[0].category, DeprecationWarning) + pca = RandomizedPCA(n_components=2, random_state=0) + Yt = assert_warns(DeprecationWarning, pca.fit, X).transform(Xt) Yt /= np.sqrt((Yt ** 2).sum()) @@ -213,25 +210,16 @@ def test_sparse_randomized_pca_inverse(): # same check that we can find the original data from the transformed signal # (since the data is almost of rank n_components) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always', DeprecationWarning) - pca = RandomizedPCA(n_components=2, random_state=0).fit(X) - assert_equal(len(w), 1) - assert_equal(w[0].category, DeprecationWarning) - + pca = RandomizedPCA(n_components=2, random_state=0) + assert_warns(DeprecationWarning, pca.fit, X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) assert_almost_equal(X.todense(), Y_inverse, decimal=2) # same as above with whitening (approximate reconstruction) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always', DeprecationWarning) - pca = RandomizedPCA(n_components=2, whiten=True, - random_state=0).fit(X) - assert_equal(len(w), 1) - assert_equal(w[0].category, DeprecationWarning) - + pca = assert_warns(DeprecationWarning, RandomizedPCA(n_components=2, + whiten=True, random_state=0).fit, X) Y = pca.transform(X) Y_inverse = pca.inverse_transform(Y) relative_max_delta = (np.abs(X.todense() - Y_inverse) @@ -374,12 +362,11 @@ def test_probabilistic_pca_1(): rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) - with warnings.catch_warnings(record=True) as w: - ppca = ProbabilisticPCA(n_components=2) - ppca.fit(X) - ll1 = ppca.score(X) - h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p - np.testing.assert_almost_equal(ll1.mean() / h, 1, 0) + ppca = assert_warns(DeprecationWarning, ProbabilisticPCA, n_components=2) + ppca.fit(X) + ll1 = ppca.score(X) + h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p + np.testing.assert_almost_equal(ll1.mean() / h, 1, 0) def test_probabilistic_pca_2(): @@ -387,12 +374,11 @@ def test_probabilistic_pca_2(): n, p = 100, 3 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) - with warnings.catch_warnings(record=True) as w: - ppca = ProbabilisticPCA(n_components=2) - ppca.fit(X) - ll1 = ppca.score(X) - ll2 = ppca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5])) - assert_greater(ll1.mean(), ll2.mean()) + ppca = assert_warns(DeprecationWarning, ProbabilisticPCA, n_components=2) + ppca.fit(X) + ll1 = ppca.score(X) + ll2 = ppca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5])) + assert_greater(ll1.mean(), ll2.mean()) def test_probabilistic_pca_3(): @@ -402,18 +388,16 @@ def test_probabilistic_pca_3(): n, p = 100, 3 rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) - with warnings.catch_warnings(record=True) as w: - ppca = ProbabilisticPCA(n_components=2) - ppca.fit(X) - ll1 = ppca.score(X) - ppca.fit(X, homoscedastic=False) - ll2 = ppca.score(X) - # XXX : Don't test as homoscedastic=False is buggy - # Comment to be removed with ProbabilisticPCA is removed + ppca = assert_warns(DeprecationWarning, ProbabilisticPCA, n_components=2) + ppca.fit(X).score(X) + ppca.fit(X, homoscedastic=False).score(X) + # XXX : Don't test as homoscedastic=False is buggy + # Comment to be removed with ProbabilisticPCA is removed def test_probabilistic_pca_4(): """Check that ppca select the right model""" + n, p = 200, 3 rng = np.random.RandomState(0) Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) @@ -421,13 +405,13 @@ def test_probabilistic_pca_4(): Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])) ll = np.zeros(p) - with warnings.catch_warnings(record=True) as w: - for k in range(p): - ppca = ProbabilisticPCA(n_components=k) - ppca.fit(Xl) - ll[k] = ppca.score(Xt).mean() + for k in range(p): + ppca = assert_warns(DeprecationWarning, ProbabilisticPCA, + n_components=k) + ppca.fit(Xl) + ll[k] = ppca.score(Xt).mean() - assert_true(ll.argmax() == 1) + assert_true(ll.argmax() == 1) def test_probabilistic_pca_vs_pca(): @@ -437,9 +421,9 @@ def test_probabilistic_pca_vs_pca(): rng = np.random.RandomState(0) X = rng.randn(n, p) * .1 + np.array([3, 4, 5]) pca = PCA(n_components=2).fit(X) - with warnings.catch_warnings(record=True) as w: - ppca = ProbabilisticPCA(n_components=2).fit(X) - assert_array_almost_equal(pca.score_samples(X), ppca.score(X)) + ppca = assert_warns(DeprecationWarning, ProbabilisticPCA, + n_components=2).fit(X) + assert_array_almost_equal(pca.score_samples(X), ppca.score(X)) if __name__ == '__main__': diff --git a/sklearn/ensemble/tests/test_gradient_boosting.py b/sklearn/ensemble/tests/test_gradient_boosting.py index df850d73fdc2f..53dcad3aa1497 100644 --- a/sklearn/ensemble/tests/test_gradient_boosting.py +++ b/sklearn/ensemble/tests/test_gradient_boosting.py @@ -3,13 +3,13 @@ """ import numpy as np -import warnings from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true +from sklearn.utils.testing import assert_warns from sklearn.metrics import mean_squared_error @@ -446,12 +446,10 @@ def test_shape_y(): y_ = np.asarray(y, dtype=np.int32) y_ = y_[:, np.newaxis] - with warnings.catch_warnings(record=True): - # This will raise a DataConversionWarning that we want to - # "always" raise, elsewhere the warnings gets ignored in the - # later tests, and the tests that check for this warning fail - warnings.simplefilter("always", DataConversionWarning) - clf.fit(X, y_) + # This will raise a DataConversionWarning that we want to + # "always" raise, elsewhere the warnings gets ignored in the + # later tests, and the tests that check for this warning fail + assert_warns(DataConversionWarning, clf.fit, X, y_) assert_array_equal(clf.predict(T), true_result) assert_equal(100, len(clf.estimators_)) @@ -490,11 +488,7 @@ def test_oob_score(): clf = GradientBoostingClassifier(n_estimators=100, random_state=1, subsample=0.5) clf.fit(X, y) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - assert_true(hasattr(clf, 'oob_score_')) - assert_equal(len(w), 1) - + assert_warns(DeprecationWarning, hasattr, clf, 'oob_score_') def test_oob_improvement(): """Test if oob improvement has correct shape and regression test. """ @@ -586,16 +580,12 @@ def test_more_verbose_output(): def test_warn_deviance(): """Test if mdeviance and bdeviance give deprecated warning. """ for loss in ('bdeviance', 'mdeviance'): - with warnings.catch_warnings(record=True) as w: - # This will raise a DataConversionWarning that we want to - # "always" raise, elsewhere the warnings gets ignored in the - # later tests, and the tests that check for this warning fail - warnings.simplefilter("always", DataConversionWarning) - clf = GradientBoostingClassifier(loss=loss) - try: - clf.fit(X, y) - except: - # mdeviance will raise ValueError because only 2 classes - pass - # deprecated warning for bdeviance and mdeviance - assert len(w) == 1 + # This will raise a DataConversionWarning that we want to + # "always" raise, elsewhere the warnings gets ignored in the + # later tests, and the tests that check for this warning fail + clf = GradientBoostingClassifier(loss=loss) + try: + assert_warns(UserWarning, clf.fit, X, y) + except ValueError: + # mdeviance will raise ValueError because only 2 classes + pass diff --git a/sklearn/feature_extraction/tests/test_text.py b/sklearn/feature_extraction/tests/test_text.py index 53c2085076a03..39e1a7406765f 100644 --- a/sklearn/feature_extraction/tests/test_text.py +++ b/sklearn/feature_extraction/tests/test_text.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import warnings + from sklearn.feature_extraction.text import strip_tags from sklearn.feature_extraction.text import strip_accents_unicode from sklearn.feature_extraction.text import strip_accents_ascii @@ -15,6 +16,7 @@ from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC + import numpy as np from nose import SkipTest from nose.tools import assert_equal @@ -25,7 +27,8 @@ from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_raises -from sklearn.utils.testing import assert_in, assert_less, assert_greater +from sklearn.utils.testing import (assert_in, assert_less, assert_greater, + assert_warns_message) from collections import defaultdict, Mapping from functools import partial @@ -177,16 +180,12 @@ def test_unicode_decode_error(): assert_raises(UnicodeDecodeError, ca, text_bytes) # Check the old interface - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - - ca = CountVectorizer(analyzer='char', ngram_range=(3, 6), - charset='ascii').build_analyzer() - assert_raises(UnicodeDecodeError, ca, text_bytes) - - assert_equal(len(w), 1) - assert_true(issubclass(w[0].category, DeprecationWarning)) - assert_true("charset" in str(w[0].message).lower()) + in_warning_message = 'charset' + ca = assert_warns_message(DeprecationWarning, in_warning_message, + CountVectorizer, analyzer='char', + ngram_range=(3, 6), + charset='ascii').build_analyzer() + assert_raises(UnicodeDecodeError, ca, text_bytes) def test_char_ngram_analyzer(): @@ -349,22 +348,15 @@ def test_tfidf_no_smoothing(): [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') - # First we need to verify that numpy here provides div 0 warnings with warnings.catch_warnings(record=True) as w: 1. / np.array([0.]) numpy_provides_div0_warning = len(w) == 1 - with warnings.catch_warnings(record=True) as w: - tfidf = tr.fit_transform(X).toarray() - if not numpy_provides_div0_warning: - raise SkipTest("Numpy does not provide div 0 warnings.") - assert_equal(len(w), 1) - # For Python 3 compatibility - if hasattr(w[0].message, 'args'): - assert_true("divide by zero" in w[0].message.args[0]) - else: - assert_true("divide by zero" in w[0].message) - + in_warning_message = 'divide by zero' + tfidf = assert_warns_message(RuntimeWarning, in_warning_message, + tr.fit_transform, X).toarray() + if not numpy_provides_div0_warning: + raise SkipTest("Numpy does not provide div 0 warnings.") def test_sublinear_tf(): X = [[1], [2], [3]] diff --git a/sklearn/feature_selection/tests/test_feature_select.py b/sklearn/feature_selection/tests/test_feature_select.py index 44ef05a422ac1..5785dfc1431ff 100644 --- a/sklearn/feature_selection/tests/test_feature_select.py +++ b/sklearn/feature_selection/tests/test_feature_select.py @@ -5,11 +5,10 @@ import itertools import numpy as np from scipy import stats, sparse -import warnings from nose.tools import assert_equal, assert_raises, assert_true from numpy.testing import assert_array_equal, assert_array_almost_equal -from sklearn.utils.testing import assert_not_in +from sklearn.utils.testing import assert_not_in, ignore_warnings from sklearn.utils import safe_mask from sklearn.datasets.samples_generator import (make_classification, @@ -432,16 +431,15 @@ def test_selectkbest_tiebreaking(): y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: - with warnings.catch_warnings(record=True): - sel = SelectKBest(dummy_score, k=1) - X1 = sel.fit_transform([X], y) - assert_equal(X1.shape[1], 1) - assert_best_scores_kept(sel) + sel = SelectKBest(dummy_score, k=1) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert_equal(X1.shape[1], 1) + assert_best_scores_kept(sel) - sel = SelectKBest(dummy_score, k=2) - X2 = sel.fit_transform([X], y) - assert_equal(X2.shape[1], 2) - assert_best_scores_kept(sel) + sel = SelectKBest(dummy_score, k=2) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert_equal(X2.shape[1], 2) + assert_best_scores_kept(sel) def test_selectpercentile_tiebreaking(): @@ -451,16 +449,15 @@ def test_selectpercentile_tiebreaking(): y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: - with warnings.catch_warnings(record=True): - sel = SelectPercentile(dummy_score, percentile=34) - X1 = sel.fit_transform([X], y) - assert_equal(X1.shape[1], 1) - assert_best_scores_kept(sel) + sel = SelectPercentile(dummy_score, percentile=34) + X1 = ignore_warnings(sel.fit_transform)([X], y) + assert_equal(X1.shape[1], 1) + assert_best_scores_kept(sel) - sel = SelectPercentile(dummy_score, percentile=67) - X2 = sel.fit_transform([X], y) - assert_equal(X2.shape[1], 2) - assert_best_scores_kept(sel) + sel = SelectPercentile(dummy_score, percentile=67) + X2 = ignore_warnings(sel.fit_transform)([X], y) + assert_equal(X2.shape[1], 2) + assert_best_scores_kept(sel) def test_tied_pvalues(): @@ -503,7 +500,7 @@ def test_nans(): for select in (SelectKBest(f_classif, 2), SelectPercentile(f_classif, percentile=67)): - select.fit(X, y) + ignore_warnings(select.fit)(X, y) assert_array_equal(select.get_support(indices=True), np.array([1, 2])) diff --git a/sklearn/feature_selection/tests/test_rfe.py b/sklearn/feature_selection/tests/test_rfe.py index 220e667406ee4..6c8023645bcc2 100644 --- a/sklearn/feature_selection/tests/test_rfe.py +++ b/sklearn/feature_selection/tests/test_rfe.py @@ -2,8 +2,6 @@ Testing Recursive feature elimination """ -import warnings - import numpy as np from numpy.testing import assert_array_almost_equal, assert_array_equal from nose.tools import assert_equal @@ -14,6 +12,8 @@ from sklearn.metrics import zero_one_loss from sklearn.svm import SVC from sklearn.utils import check_random_state +from sklearn.utils.testing import ignore_warnings + from sklearn.metrics.scorer import SCORERS def test_rfe_set_params(): @@ -89,8 +89,7 @@ def test_rfecv(): # Test using a customized loss function rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5, loss_func=zero_one_loss) - with warnings.catch_warnings(record=True): - rfecv.fit(X, y) + ignore_warnings(rfecv.fit)(X, y) X_r = rfecv.transform(X) assert_array_equal(X_r, iris.data) diff --git a/sklearn/linear_model/omp.py b/sklearn/linear_model/omp.py index 2e3af45a93844..d369946d2aaea 100644 --- a/sklearn/linear_model/omp.py +++ b/sklearn/linear_model/omp.py @@ -199,14 +199,14 @@ def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small - warnings.warn(premature, RuntimeWarning, stacklevel=2) + warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] solve_triangular(L[:n_active, :n_active], L[n_active, :n_active]) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent - warnings.warn(premature, RuntimeWarning, stacklevel=2) + warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = np.sqrt(1 - v) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) @@ -225,7 +225,7 @@ def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta - if tol_curr <= tol: + if abs(tol_curr) <= tol: break elif n_active == max_features: break diff --git a/sklearn/linear_model/tests/test_coordinate_descent.py b/sklearn/linear_model/tests/test_coordinate_descent.py index 84ef701b4ee46..3b50286980e9c 100644 --- a/sklearn/linear_model/tests/test_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_coordinate_descent.py @@ -2,7 +2,6 @@ # Alexandre Gramfort # License: BSD 3 clause -import warnings from sys import version_info import numpy as np @@ -15,6 +14,8 @@ from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises +from sklearn.utils.testing import assert_warns +from sklearn.utils.testing import ignore_warnings from sklearn.linear_model.coordinate_descent import Lasso, \ LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \ @@ -187,11 +188,10 @@ def test_lasso_path_return_models_vs_new_return_gives_same_coefficients(): y = np.array([1, 2, 3.1]) alphas = [5., 1., .5] # Compute the lasso_path - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - coef_path = [e.coef_ for e in lasso_path(X, y, alphas=alphas, - return_models=True, - fit_intercept=False)] + f = ignore_warnings + coef_path = [e.coef_ for e in f(lasso_path)(X, y, alphas=alphas, + return_models=True, + fit_intercept=False)] # Use lars_path and lasso_path(new output) with 1D linear interpolation # to compute the the same path @@ -218,23 +218,22 @@ def test_enet_path(): n_informative_features=100) max_iter = 150 - with warnings.catch_warnings(): - # Here we have a small number of iterations, and thus the - # ElasticNet might not converge. This is to speed up tests - warnings.simplefilter("ignore", UserWarning) - clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, - max_iter=max_iter) - clf.fit(X, y) - # Well-conditioned settings, we should have selected our - # smallest penalty - assert_almost_equal(clf.alpha_, min(clf.alphas_)) - # Non-sparse ground truth: we should have seleted an elastic-net - # that is closer to ridge than to lasso - assert_equal(clf.l1_ratio_, min(clf.l1_ratio)) - - clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, - max_iter=max_iter, precompute=True) - clf.fit(X, y) + # Here we have a small number of iterations, and thus the + # ElasticNet might not converge. This is to speed up tests + clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, + max_iter=max_iter) + ignore_warnings(clf.fit)(X, y) + # Well-conditioned settings, we should have selected our + # smallest penalty + assert_almost_equal(clf.alpha_, min(clf.alphas_)) + # Non-sparse ground truth: we should have seleted an elastic-net + # that is closer to ridge than to lasso + assert_equal(clf.l1_ratio_, min(clf.l1_ratio)) + + clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, + max_iter=max_iter, precompute=True) + ignore_warnings(clf.fit)(X, y) + # Well-conditioned settings, we should have selected our # smallest penalty @@ -262,28 +261,21 @@ def test_path_parameters(): def test_warm_start(): X, y, _, _ = build_dataset() - with warnings.catch_warnings(): - warnings.simplefilter("ignore", UserWarning) - clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True) - clf.fit(X, y) - clf.fit(X, y) # do a second round with 5 iterations + clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True) + ignore_warnings(clf.fit)(X, y) + ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations - clf2 = ElasticNet(alpha=0.1, max_iter=10) - clf2.fit(X, y) - assert_array_almost_equal(clf2.coef_, clf.coef_) + clf2 = ElasticNet(alpha=0.1, max_iter=10) + ignore_warnings(clf2.fit)(X, y) + assert_array_almost_equal(clf2.coef_, clf.coef_) def test_lasso_alpha_warning(): - check_warnings() # Skip if unsupported Python version - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - X = [[-1], [0], [1]] - Y = [-1, 0, 1] # just a straight line - - clf = Lasso(alpha=0) - clf.fit(X, Y) + X = [[-1], [0], [1]] + Y = [-1, 0, 1] # just a straight line - assert_greater(len(w), 0) # warnings should be raised + clf = Lasso(alpha=0) + assert_warns(UserWarning, clf.fit, X, Y) def test_lasso_positive_constraint(): diff --git a/sklearn/linear_model/tests/test_least_angle.py b/sklearn/linear_model/tests/test_least_angle.py index 1c2788c0a190f..813ca0d0bd6ef 100644 --- a/sklearn/linear_model/tests/test_least_angle.py +++ b/sklearn/linear_model/tests/test_least_angle.py @@ -1,4 +1,3 @@ -import warnings from nose.tools import assert_equal import numpy as np @@ -9,7 +8,7 @@ from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises - +from sklearn.utils.testing import ignore_warnings, assert_warns_message from sklearn import linear_model, datasets diabetes = datasets.load_diabetes() @@ -112,7 +111,8 @@ def test_collinearity(): [1., 1., 0]]) y = np.array([1., 0., 0]) - _, _, coef_path_ = linear_model.lars_path(X, y, alpha_min=0.01) + f = ignore_warnings + _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert_true(not np.isnan(coef_path_).any()) residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded @@ -182,12 +182,10 @@ def test_singular_matrix(): # to give a good answer X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) - with warnings.catch_warnings(record=True) as warning_list: - warnings.simplefilter("always", UserWarning) - alphas, active, coef_path = linear_model.lars_path(X1, y1) - assert_true(len(warning_list) > 0) - assert_true('Dropping a regressor' in warning_list[0].message.args[0]) - + in_warn_message = 'Dropping a regressor' + f = assert_warns_message + alphas, active, coef_path = f(UserWarning, in_warn_message, + linear_model.lars_path, X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) @@ -320,27 +318,26 @@ def test_lasso_lars_vs_lasso_cd_ill_conditioned(): y += sigma * rng.rand(*y.shape) y = y.squeeze() - with warnings.catch_warnings(record=True) as warning_list: - warnings.simplefilter("always", UserWarning) - lars_alphas, _, lars_coef = linear_model.lars_path(X, y, - method='lasso') - - assert_true(len(warning_list) > 0) - assert_true(('Dropping a regressor' in warning_list[0].message.args[0]) - or ('Early stopping' in warning_list[0].message.args[0])) - - _, lasso_coef2, _ = linear_model.lasso_path(X, y, - alphas=lars_alphas, tol=1e-6, - fit_intercept=False) - - lasso_coef = np.zeros((w.shape[0], len(lars_alphas))) - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - for i, model in enumerate(linear_model.lasso_path(X, y, - alphas=lars_alphas, - tol=1e-6, - return_models=True, - fit_intercept=False)): + f = assert_warns_message + def in_warn_message(msg): + return 'Early stopping' in msg or 'Dropping regressor' in msg + lars_alphas, _, lars_coef = f(UserWarning, + in_warn_message, + linear_model.lars_path, X, y, method='lasso') + + with ignore_warnings(): + _, lasso_coef2, _ = linear_model.lasso_path(X, y, + alphas=lars_alphas, + tol=1e-6, + fit_intercept=False) + + lasso_coef = np.zeros((w.shape[0], len(lars_alphas))) + iter_models = enumerate(linear_model.lasso_path(X, y, + alphas=lars_alphas, + tol=1e-6, + return_models=True, + fit_intercept=False)) + for i, model in iter_models: lasso_coef[:, i] = model.coef_ np.testing.assert_array_almost_equal(lars_coef, lasso_coef, decimal=1) diff --git a/sklearn/linear_model/tests/test_omp.py b/sklearn/linear_model/tests/test_omp.py index 173b484b4873e..dd87fece83bfe 100644 --- a/sklearn/linear_model/tests/test_omp.py +++ b/sklearn/linear_model/tests/test_omp.py @@ -1,8 +1,6 @@ # Author: Vlad Niculae # Licence: BSD 3 clause -import warnings - import numpy as np from sklearn.utils.testing import assert_raises @@ -10,7 +8,9 @@ from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal -from sklearn.utils.testing import assert_greater +from sklearn.utils.testing import assert_warns +from sklearn.utils.testing import ignore_warnings + from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit, @@ -70,17 +70,15 @@ def test_with_without_gram_tol(): def test_unreachable_accuracy(): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - assert_array_almost_equal( - orthogonal_mp(X, y, tol=0), - orthogonal_mp(X, y, n_nonzero_coefs=n_features)) + assert_array_almost_equal( + orthogonal_mp(X, y, tol=0), + orthogonal_mp(X, y, n_nonzero_coefs=n_features)) - assert_array_almost_equal( - orthogonal_mp(X, y, tol=0, precompute=True), - orthogonal_mp(X, y, precompute=True, - n_nonzero_coefs=n_features)) - assert_greater(len(w), 0) # warnings should be raised + assert_array_almost_equal( + assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0, + precompute=True), + orthogonal_mp(X, y, precompute=True, + n_nonzero_coefs=n_features)) def test_bad_input(): @@ -118,44 +116,32 @@ def test_estimator(): omp.set_params(fit_intercept=False, normalize=False) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - omp.fit(X, y[:, 0], Gram=G, Xy=Xy[:, 0]) - assert_equal(omp.coef_.shape, (n_features,)) - assert_equal(omp.intercept_, 0) - assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs) - assert_true(len(w) == 2) + assert_warns(DeprecationWarning, omp.fit, X, y[:, 0], Gram=G, Xy=Xy[:, 0]) + assert_equal(omp.coef_.shape, (n_features,)) + assert_equal(omp.intercept_, 0) + assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - omp.fit(X, y, Gram=G, Xy=Xy) - assert_equal(omp.coef_.shape, (n_targets, n_features)) - assert_equal(omp.intercept_, 0) - assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs) - assert_true(len(w) == 2) + assert_warns(DeprecationWarning, omp.fit, X, y, Gram=G, Xy=Xy) + assert_equal(omp.coef_.shape, (n_targets, n_features)) + assert_equal(omp.intercept_, 0) + assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs) def test_scaling_with_gram(): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - # Use only 1 nonzero coef to be faster and to avoid warnings - omp1 = OrthogonalMatchingPursuit(n_nonzero_coefs=1, - fit_intercept=False, normalize=False) - omp2 = OrthogonalMatchingPursuit(n_nonzero_coefs=1, - fit_intercept=True, normalize=False) - omp3 = OrthogonalMatchingPursuit(n_nonzero_coefs=1, - fit_intercept=False, normalize=True) - omp1.fit(X, y, Gram=G) - omp1.fit(X, y, Gram=G, Xy=Xy) - assert_true(len(w) == 3) - omp2.fit(X, y, Gram=G) - assert_true(len(w) == 5) - omp2.fit(X, y, Gram=G, Xy=Xy) - assert_true(len(w) == 8) - omp3.fit(X, y, Gram=G) - assert_true(len(w) == 10) - omp3.fit(X, y, Gram=G, Xy=Xy) - assert_true(len(w) == 13) + omp1 = OrthogonalMatchingPursuit(n_nonzero_coefs=1, + fit_intercept=False, normalize=False) + omp2 = OrthogonalMatchingPursuit(n_nonzero_coefs=1, + fit_intercept=True, normalize=False) + omp3 = OrthogonalMatchingPursuit(n_nonzero_coefs=1, + fit_intercept=False, normalize=True) + + f, w = assert_warns, DeprecationWarning + f(w, omp1.fit, X, y, Gram=G) + f(w, omp1.fit, X, y, Gram=G, Xy=Xy) + f(w, omp2.fit, X, y, Gram=G) + f(w, omp2.fit, X, y, Gram=G, Xy=Xy) + f(w, omp3.fit, X, y, Gram=G) + f(w, omp3.fit, X, y, Gram=G, Xy=Xy) def test_identical_regressors(): @@ -164,10 +150,7 @@ def test_identical_regressors(): gamma = np.zeros(n_features) gamma[0] = gamma[1] = 1. newy = np.dot(newX, gamma) - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - orthogonal_mp(newX, newy, 2) - assert_true(len(w) == 1) + assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2) def test_swapped_regressors(): @@ -188,10 +171,8 @@ def test_swapped_regressors(): def test_no_atoms(): y_empty = np.zeros_like(y) Xy_empty = np.dot(X.T, y_empty) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') - gamma_empty = orthogonal_mp(X, y_empty, 1) - gamma_empty_gram = orthogonal_mp_gram(G, Xy_empty, 1) + gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1) + gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1) assert_equal(np.all(gamma_empty == 0), True) assert_equal(np.all(gamma_empty_gram == 0), True) diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py index 61c30e9fec673..db42e7f47d6e0 100644 --- a/sklearn/linear_model/tests/test_ridge.py +++ b/sklearn/linear_model/tests/test_ridge.py @@ -1,8 +1,6 @@ import numpy as np import scipy.sparse as sp -import warnings - from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal @@ -10,6 +8,7 @@ from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises +from sklearn.utils.testing import ignore_warnings from sklearn import datasets from sklearn.metrics import mean_squared_error @@ -280,16 +279,15 @@ def _test_ridge_loo(filter_): ret.append(alpha_) # check that we get same best alpha with custom loss_func + f = ignore_warnings ridge_gcv2 = RidgeCV(fit_intercept=False, loss_func=mean_squared_error) - with warnings.catch_warnings(record=True): - ridge_gcv2.fit(filter_(X_diabetes), y_diabetes) + f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv2.alpha_, alpha_) # check that we get same best alpha with custom score_func func = lambda x, y: -mean_squared_error(x, y) ridge_gcv3 = RidgeCV(fit_intercept=False, score_func=func) - with warnings.catch_warnings(record=True): - ridge_gcv3.fit(filter_(X_diabetes), y_diabetes) + f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes) assert_equal(ridge_gcv3.alpha_, alpha_) # check that we get same best alpha with a scorer diff --git a/sklearn/linear_model/tests/test_sparse_coordinate_descent.py b/sklearn/linear_model/tests/test_sparse_coordinate_descent.py index aade32fca4616..0b1623409b90a 100644 --- a/sklearn/linear_model/tests/test_sparse_coordinate_descent.py +++ b/sklearn/linear_model/tests/test_sparse_coordinate_descent.py @@ -1,14 +1,16 @@ -import warnings - import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal -from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less +from sklearn.utils.testing import assert_true + from sklearn.utils.testing import assert_greater +from sklearn.utils.testing import ignore_warnings + + from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet, ElasticNetCV) @@ -58,10 +60,9 @@ def test_enet_toy_list_input(): # this should be the same as unregularized least squares clf = ElasticNet(alpha=0, l1_ratio=1.0) - with warnings.catch_warnings(record=True): - # catch warning about alpha=0. - # this is discouraged but should work. - clf.fit(X, Y) + # catch warning about alpha=0. + # this is discouraged but should work. + ignore_warnings(clf.fit)(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) @@ -85,7 +86,7 @@ def test_enet_toy_list_input(): def test_enet_toy_explicit_sparse_input(): """Test ElasticNet for various values of alpha and l1_ratio with sparse X""" - + f = ignore_warnings # training samples X = sp.lil_matrix((3, 1)) X[0, 0] = -1 @@ -101,7 +102,7 @@ def test_enet_toy_explicit_sparse_input(): # this should be the same as lasso clf = ElasticNet(alpha=0, l1_ratio=1.0) - clf.fit(X, Y) + f(clf.fit)(X, Y) pred = clf.predict(T) assert_array_almost_equal(clf.coef_, [1]) assert_array_almost_equal(pred, [2, 3, 4]) @@ -240,10 +241,10 @@ def test_path_parameters(): n_alphas = 10 clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter, l1_ratio=0.5, fit_intercept=False) - clf.fit(X, y) # new params + ignore_warnings(clf.fit)(X, y) # new params assert_almost_equal(0.5, clf.l1_ratio) assert_equal(n_alphas, clf.n_alphas) assert_equal(n_alphas, len(clf.alphas_)) sparse_mse_path = clf.mse_path_ - clf.fit(X.toarray(), y) # compare with dense data + ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data assert_almost_equal(clf.mse_path_, sparse_mse_path) diff --git a/sklearn/manifold/tests/test_locally_linear.py b/sklearn/manifold/tests/test_locally_linear.py index 9931e43f260a8..07bbfee38c57c 100644 --- a/sklearn/manifold/tests/test_locally_linear.py +++ b/sklearn/manifold/tests/test_locally_linear.py @@ -6,6 +6,7 @@ from sklearn import neighbors, manifold from sklearn.manifold.locally_linear import barycenter_kneighbors_graph from sklearn.utils.testing import assert_less +from sklearn.utils.testing import ignore_warnings eigen_solvers = ['dense', 'arpack'] @@ -113,12 +114,11 @@ def test_pipeline(): # Test the error raised when the weight matrix is singular def test_singular_matrix(): - import warnings from nose.tools import assert_raises M = np.ones((10, 3)) - with warnings.catch_warnings(record=True): - assert_raises(ValueError, manifold.locally_linear_embedding, - M, 2, 1, method='standard', eigen_solver='arpack') + f = ignore_warnings + assert_raises(ValueError, f(manifold.locally_linear_embedding), + M, 2, 1, method='standard', eigen_solver='arpack') if __name__ == '__main__': diff --git a/sklearn/manifold/tests/test_spectral_embedding.py b/sklearn/manifold/tests/test_spectral_embedding.py index 6b394453056e4..288a1d069ad4b 100644 --- a/sklearn/manifold/tests/test_spectral_embedding.py +++ b/sklearn/manifold/tests/test_spectral_embedding.py @@ -1,5 +1,3 @@ -import warnings - from nose.tools import assert_true from nose.tools import assert_equal diff --git a/sklearn/metrics/tests/test_metrics.py b/sklearn/metrics/tests/test_metrics.py index b3ed308fcb035..ba7914b0a9f8d 100644 --- a/sklearn/metrics/tests/test_metrics.py +++ b/sklearn/metrics/tests/test_metrics.py @@ -1,8 +1,6 @@ from __future__ import division, print_function -import warnings import numpy as np - from functools import partial from itertools import product @@ -24,7 +22,8 @@ assert_array_almost_equal, assert_warns, assert_greater, - ignore_warnings) + ignore_warnings, + assert_warns_message) from sklearn.metrics import (accuracy_score, @@ -53,6 +52,7 @@ zero_one, zero_one_score, zero_one_loss) + from sklearn.metrics.metrics import _check_clf_targets from sklearn.metrics.metrics import _check_reg_targets from sklearn.metrics.metrics import UndefinedMetricWarning @@ -353,8 +353,8 @@ def test_roc_curve(): assert_array_almost_equal(roc_auc, expected_auc, decimal=2) assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred)) - with warnings.catch_warnings(record=True): - assert_almost_equal(roc_auc, auc_score(y_true, probas_pred)) + assert_almost_equal(roc_auc, + ignore_warnings(auc_score)(y_true, probas_pred)) assert_equal(fpr.shape, tpr.shape) assert_equal(fpr.shape, thresholds.shape) @@ -442,9 +442,8 @@ def test_roc_curve_one_label(): y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # assert there are warnings - with warnings.catch_warnings(record=True) as w: - fpr, tpr, thresholds = roc_curve(y_true, y_pred) - assert_equal(len(w), 1) + w = UndefinedMetricWarning + fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred) # all true labels, all fpr should be nan assert_array_equal(fpr, np.nan * np.ones(len(thresholds))) @@ -452,10 +451,9 @@ def test_roc_curve_one_label(): assert_equal(fpr.shape, thresholds.shape) # assert there are warnings - with warnings.catch_warnings(record=True) as w: - fpr, tpr, thresholds = roc_curve([1 - x for x in y_true], - y_pred) - assert_equal(len(w), 1) + fpr, tpr, thresholds = assert_warns(w, roc_curve, + [1 - x for x in y_true], + y_pred) # all negative labels, all tpr should be nan assert_array_equal(tpr, np.nan * np.ones(len(thresholds))) @@ -530,27 +528,28 @@ def test_auc_score_non_binary_class(): assert_raise_message(ValueError, "AUC is defined for binary " "classification only", roc_auc_score, y_true, y_pred) - with warnings.catch_warnings(record=True): - rng = check_random_state(404) - y_pred = rng.rand(10) - # y_true contains only one class value - y_true = np.zeros(10, dtype="int") - assert_raise_message(ValueError, "AUC is defined for binary " - "classification only", auc_score, - y_true, y_pred) - y_true = np.ones(10, dtype="int") - assert_raise_message(ValueError, "AUC is defined for binary " - "classification only", auc_score, y_true, - y_pred) - y_true = -np.ones(10, dtype="int") - assert_raise_message(ValueError, "AUC is defined for binary " - "classification only", auc_score, y_true, - y_pred) - # y_true contains three different class values - y_true = rng.randint(0, 3, size=10) - assert_raise_message(ValueError, "AUC is defined for binary " - "classification only", auc_score, y_true, - y_pred) + rng = check_random_state(404) + y_pred = rng.rand(10) + # y_true contains only one class value + y_true = np.zeros(10, dtype="int") + + f = ignore_warnings(auc_score) + assert_raise_message(ValueError, "AUC is defined for binary " + "classification only", f, + y_true, y_pred) + y_true = np.ones(10, dtype="int") + assert_raise_message(ValueError, "AUC is defined for binary " + "classification only", f, y_true, + y_pred) + y_true = -np.ones(10, dtype="int") + assert_raise_message(ValueError, "AUC is defined for binary " + "classification only", f, y_true, + y_pred) + # y_true contains three different class values + y_true = rng.randint(0, 3, size=10) + assert_raise_message(ValueError, "AUC is defined for binary " + "classification only", f, y_true, + y_pred) def test_precision_recall_f1_score_binary(): @@ -656,13 +655,10 @@ def test(y_true, y_pred): test([str(y) for y in y_true], [str(y) for y in y_pred]) - +@ignore_warnings def test_matthews_corrcoef_nan(): - with warnings.catch_warnings(): - warnings.simplefilter("always") - assert_equal(matthews_corrcoef([0], [1]), 0.0) - warnings.simplefilter("error") - assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0) + assert_equal(matthews_corrcoef([0], [1]), 0.0) + assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0) def test_precision_recall_f1_score_multiclass(): @@ -980,12 +976,12 @@ def test_score_scale_invariance(): assert_equal(roc_auc, roc_auc_scaled) assert_equal(roc_auc, roc_auc_shifted) - with warnings.catch_warnings(record=True): - roc_auc = auc_score(y_true, probas_pred) - roc_auc_scaled = auc_score(y_true, 100 * probas_pred) - roc_auc_shifted = auc_score(y_true, probas_pred - 10) - assert_equal(roc_auc, roc_auc_scaled) - assert_equal(roc_auc, roc_auc_shifted) + f = ignore_warnings(auc_score) + roc_auc = f(y_true, probas_pred) + roc_auc_scaled = f(y_true, 100 * probas_pred) + roc_auc_shifted = f(y_true, probas_pred - 10) + assert_equal(roc_auc, roc_auc_scaled) + assert_equal(roc_auc, roc_auc_shifted) pr_auc = average_precision_score(y_true, probas_pred) pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred) @@ -1002,9 +998,9 @@ def test_losses(): # Classification # -------------- - with warnings.catch_warnings(record=True): # Throw deprecated warning - assert_equal(zero_one(y_true, y_pred), 11) + f = ignore_warnings + assert_equal(f(zero_one)(y_true, y_pred), 11) assert_almost_equal(zero_one_loss(y_true, y_pred), 11 / float(n_samples), 2) @@ -1017,10 +1013,8 @@ def test_losses(): assert_equal(accuracy_score(y_true, y_pred), 1 - zero_one_loss(y_true, y_pred)) - with warnings.catch_warnings(record=True): - # Throw deprecated warning - assert_equal(zero_one_score(y_true, y_pred), - 1 - zero_one_loss(y_true, y_pred)) + assert_equal(f(zero_one_score)(y_true, y_pred), + 1 - zero_one_loss(y_true, y_pred)) # Regression # ---------- @@ -1083,16 +1077,16 @@ def test_symmetry(): msg="%s seems to be symmetric" % name) # Deprecated metrics - with warnings.catch_warnings(record=True): - # Throw deprecated warning - assert_almost_equal(zero_one(y_true, y_pred), - zero_one(y_pred, y_true)) - assert_almost_equal(zero_one(y_true, y_pred, normalize=False), - zero_one(y_pred, y_true, normalize=False)) + f = ignore_warnings + assert_almost_equal(f(zero_one)(y_true, y_pred), + f(zero_one)(y_pred, y_true)) - assert_almost_equal(zero_one_score(y_true, y_pred), - zero_one_score(y_pred, y_true)) + assert_almost_equal(f(zero_one)(y_true, y_pred, normalize=False), + f(zero_one)(y_pred, y_true, normalize=False)) + + assert_almost_equal(f(zero_one_score)(y_true, y_pred), + f(zero_one_score)(y_pred, y_true)) def test_sample_order_invariance(): @@ -1243,20 +1237,15 @@ def test_hinge_loss_binary(): pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4) - with warnings.catch_warnings(record=True): - # Test deprecated pos_label - assert_equal( - hinge_loss(-y_true, pred_decision), - hinge_loss(y_true, pred_decision, pos_label=-1, neg_label=1)) + f = ignore_warnings(hinge_loss) + assert_equal(f(-y_true, pred_decision), + f(y_true, pred_decision, pos_label=-1, neg_label=1)) y_true = np.array([0, 2, 2, 0]) pred_decision = np.array([-8.5, 0.5, 1.5, -0.3]) - assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4) - with warnings.catch_warnings(record=True): - # Test deprecated pos_label - assert_equal(hinge_loss(y_true, pred_decision, pos_label=2, - neg_label=0), 1.2 / 4) + + assert_equal(f(y_true, pred_decision, pos_label=2, neg_label=0), 1.2 / 4) def test_multioutput_regression(): @@ -1336,49 +1325,52 @@ def test_multilabel_representation_invariance(): y2_shuffle_binary_indicator = lb.transform(y2_shuffle) for name, metric in MULTILABELS_METRICS.items(): - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - - measure = metric(y1, y2) - - # Check representation invariance - assert_almost_equal(metric(y1_binary_indicator, - y2_binary_indicator), - measure, - err_msg="%s failed representation invariance " - "between list of list of labels " - "format and dense binary indicator " - "format." % name) - - # Check invariance with redundant labels with list of labels - assert_almost_equal(metric(y1, y2_redundant), measure, - err_msg="%s failed rendundant label invariance" - % name) - - assert_almost_equal(metric(y1_redundant, y2_redundant), measure, - err_msg="%s failed rendundant label invariance" - % name) - - assert_almost_equal(metric(y1_redundant, y2), measure, - err_msg="%s failed rendundant label invariance" - % name) - - # Check shuffling invariance with list of labels - assert_almost_equal(metric(y1_shuffle, y2_shuffle), measure, - err_msg="%s failed shuffling invariance " - "with list of list of labels format." - % name) - - # Check shuffling invariance with dense binary indicator matrix - assert_almost_equal(metric(y1_shuffle_binary_indicator, - y2_shuffle_binary_indicator), measure, - err_msg="%s failed shuffling invariance " - " with dense binary indicator format." - % name) - - # Check raises error with mix input representation - assert_raises(ValueError, metric, y1, y2_binary_indicator) - assert_raises(ValueError, metric, y1_binary_indicator, y2) + # XXX cruel hack to work with partial functions + if isinstance(metric, partial): + metric.__module__ = 'tmp' + metric.__name__ = 'foo' + metric = ignore_warnings(metric) + measure = metric(y1, y2) + + + # Check representation invariance + assert_almost_equal(metric(y1_binary_indicator, + y2_binary_indicator), + measure, + err_msg="%s failed representation invariance " + "between list of list of labels " + "format and dense binary indicator " + "format." % name) + + # Check invariance with redundant labels with list of labels + assert_almost_equal(metric(y1, y2_redundant), measure, + err_msg="%s failed rendundant label invariance" + % name) + + assert_almost_equal(metric(y1_redundant, y2_redundant), measure, + err_msg="%s failed rendundant label invariance" + % name) + + assert_almost_equal(metric(y1_redundant, y2), measure, + err_msg="%s failed rendundant label invariance" + % name) + + # Check shuffling invariance with list of labels + assert_almost_equal(metric(y1_shuffle, y2_shuffle), measure, + err_msg="%s failed shuffling invariance " + "with list of list of labels format." + % name) + + # Check shuffling invariance with dense binary indicator matrix + assert_almost_equal(metric(y1_shuffle_binary_indicator, + y2_shuffle_binary_indicator), measure, + err_msg="%s failed shuffling invariance " + " with dense binary indicator format." + % name) + + # Check raises error with mix input representation + assert_raises(ValueError, metric, y1, y2_binary_indicator) + assert_raises(ValueError, metric, y1_binary_indicator, y2) def test_multilabel_zero_one_loss_subset(): @@ -1796,94 +1788,80 @@ def test_precision_recall_f1_no_labels(): # |y_hat_i inter y_i | = [0, 0, 0] # |y_i| = [0, 0, 0] # |y_hat_i| = [0, 0, 0] - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - for beta in [1]: + for beta in [1]: + p, r, f, s = assert_warns(UndefinedMetricWarning, + precision_recall_fscore_support, + y_true, y_pred, average=None, beta=beta) + assert_array_almost_equal(p, [0, 0, 0], 2) + assert_array_almost_equal(r, [0, 0, 0], 2) + assert_array_almost_equal(f, [0, 0, 0], 2) + assert_array_almost_equal(s, [0, 0, 0], 2) + + fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, + y_true, y_pred, beta=beta, average=None) + assert_array_almost_equal(fbeta, [0, 0, 0], 2) + + for average in ["macro", "micro", "weighted", "samples"]: p, r, f, s = assert_warns(UndefinedMetricWarning, precision_recall_fscore_support, - y_true, y_pred, average=None, beta=beta) - assert_array_almost_equal(p, [0, 0, 0], 2) - assert_array_almost_equal(r, [0, 0, 0], 2) - assert_array_almost_equal(f, [0, 0, 0], 2) - assert_array_almost_equal(s, [0, 0, 0], 2) + y_true, y_pred, average=average, + beta=beta) + assert_almost_equal(p, 0) + assert_almost_equal(r, 0) + assert_almost_equal(f, 0) + assert_equal(s, None) fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, - y_true, y_pred, beta=beta, average=None) - assert_array_almost_equal(fbeta, [0, 0, 0], 2) + y_true, y_pred, + beta=beta, average=average) + assert_almost_equal(fbeta, 0) - for average in ["macro", "micro", "weighted", "samples"]: - p, r, f, s = assert_warns(UndefinedMetricWarning, - precision_recall_fscore_support, - y_true, y_pred, average=average, - beta=beta) - assert_almost_equal(p, 0) - assert_almost_equal(r, 0) - assert_almost_equal(f, 0) - assert_equal(s, None) - fbeta = assert_warns(UndefinedMetricWarning, fbeta_score, - y_true, y_pred, - beta=beta, average=average) - assert_almost_equal(fbeta, 0) +def test_prf_warnings(): + # average of per-label scores + f, w = precision_recall_fscore_support, UndefinedMetricWarning + my_assert = assert_warns_message + for average in [None, 'weighted', 'macro']: + msg = ('Precision and F-score are ill-defined and ' + 'being set to 0.0 in labels with no predicted samples.') + my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average) -def test_prf_warnings(): - with warnings.catch_warnings(record=True) as record: - warnings.simplefilter('always') - - # average of per-label scores - for average in [None, 'weighted', 'macro']: - precision_recall_fscore_support([0, 1, 2], [1, 1, 2], - average=average) - assert_equal(str(record.pop().message), - 'Precision and F-score are ill-defined and being ' - 'set to 0.0 in labels with no predicted samples.') - precision_recall_fscore_support([1, 1, 2], [0, 1, 2], - average=average) - assert_equal(str(record.pop().message), - 'Recall and F-score are ill-defined and ' - 'being set to 0.0 in labels with no true samples.') + msg = ('Recall and F-score are ill-defined and ' + 'being set to 0.0 in labels with no true samples.') + my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average) # average of per-sample scores - precision_recall_fscore_support(np.array([[1, 0], [1, 0]]), - np.array([[1, 0], [0, 0]]), - average='samples') - assert_equal(str(record.pop().message), - 'Precision and F-score are ill-defined and ' - 'being set to 0.0 in samples with no predicted labels.') - precision_recall_fscore_support(np.array([[1, 0], [0, 0]]), - np.array([[1, 0], [1, 0]]), - average='samples') - assert_equal(str(record.pop().message), - 'Recall and F-score are ill-defined and ' - 'being set to 0.0 in samples with no true labels.') + msg = ('Precision and F-score are ill-defined and ' + 'being set to 0.0 in samples with no predicted labels.') + my_assert(w, msg, f, np.array([[1, 0], [1, 0]]), + np.array([[1, 0], [0, 0]]), average='samples') + + msg = ('Recall and F-score are ill-defined and ' + 'being set to 0.0 in samples with no true labels.') + my_assert(w, msg, f, np.array([[1, 0], [0, 0]]), np.array([[1, 0], [1, 0]]), + average='samples') # single score: micro-average - precision_recall_fscore_support(np.array([[1, 1], [1, 1]]), - np.array([[0, 0], [0, 0]]), - average='micro') - assert_equal(str(record.pop().message), - 'Precision and F-score are ill-defined and ' - 'being set to 0.0 due to no predicted samples.') - precision_recall_fscore_support(np.array([[0, 0], [0, 0]]), - np.array([[1, 1], [1, 1]]), - average='micro') - assert_equal(str(record.pop().message), - 'Recall and F-score are ill-defined and ' - 'being set to 0.0 due to no true samples.') + msg = ('Precision and F-score are ill-defined and ' + 'being set to 0.0 due to no predicted samples.') + my_assert(w, msg, f, np.array([[1, 1], [1, 1]]), + np.array([[0, 0], [0, 0]]), average='micro') + + msg =('Recall and F-score are ill-defined and ' + 'being set to 0.0 due to no true samples.') + my_assert(w, msg, f, np.array([[0, 0], [0, 0]]), + np.array([[1, 1], [1, 1]]), average='micro') # single postive label - precision_recall_fscore_support([1, 1], [-1, -1], - average='macro') - assert_equal(str(record.pop().message), - 'Precision and F-score are ill-defined and ' - 'being set to 0.0 due to no predicted samples.') - precision_recall_fscore_support([-1, -1], [1, 1], - average='macro') - assert_equal(str(record.pop().message), - 'Recall and F-score are ill-defined and ' - 'being set to 0.0 due to no true samples.') + msg = ('Precision and F-score are ill-defined and ' + 'being set to 0.0 due to no predicted samples.') + my_assert(w, msg, f, [1, 1], [-1, -1], average='macro') + + msg = ('Recall and F-score are ill-defined and ' + 'being set to 0.0 due to no true samples.') + my_assert(w, msg, f, [-1, -1], [1, 1], average='macro') def test__check_clf_targets(): diff --git a/sklearn/neighbors/tests/test_neighbors.py b/sklearn/neighbors/tests/test_neighbors.py index 30da0e22b3ede..b8527f1c1ebfc 100644 --- a/sklearn/neighbors/tests/test_neighbors.py +++ b/sklearn/neighbors/tests/test_neighbors.py @@ -1,4 +1,3 @@ -import warnings from itertools import product import numpy as np @@ -11,6 +10,8 @@ from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true +from sklearn.utils.testing import assert_warns +from sklearn.utils.testing import ignore_warnings from sklearn.utils.validation import check_random_state from sklearn import neighbors, datasets @@ -756,7 +757,7 @@ def test_neighbors_badargs(): nbrs.predict, X) assert_raises(ValueError, - nbrs.fit, + ignore_warnings(nbrs.fit), Xsparse, y) nbrs = cls() assert_raises(ValueError, @@ -785,10 +786,7 @@ def test_neighbors_deprecation_arg(): warning to be raised, as well as not crash the estimator.""" for cls in (neighbors.KNeighborsClassifier, neighbors.KNeighborsRegressor): - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter("always") - cls(warn_on_equidistant=True) - assert_equal(len(w), 1) + assert_warns(DeprecationWarning, cls, warn_on_equidistant=True) def test_neighbors_metrics(n_samples=20, n_features=3, diff --git a/sklearn/svm/tests/test_sparse.py b/sklearn/svm/tests/test_sparse.py index dcdb5065297ef..59168201fd991 100644 --- a/sklearn/svm/tests/test_sparse.py +++ b/sklearn/svm/tests/test_sparse.py @@ -1,4 +1,3 @@ -import warnings from nose.tools import assert_raises, assert_true, assert_false import numpy as np @@ -281,10 +280,8 @@ def test_sparse_svc_clone_with_callable_kernel(): def test_timeout(): sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True, random_state=0, max_iter=1) - with warnings.catch_warnings(record=True): - warnings.simplefilter("always") - assert_warns(ConvergenceWarning, sp.fit, X_sp, Y) + assert_warns(ConvergenceWarning, sp.fit, X_sp, Y) def test_consistent_proba(): diff --git a/sklearn/svm/tests/test_svm.py b/sklearn/svm/tests/test_svm.py index 8b0ad94d0af93..8b1da554b593c 100644 --- a/sklearn/svm/tests/test_svm.py +++ b/sklearn/svm/tests/test_svm.py @@ -4,7 +4,6 @@ TODO: remove hard coded numerical results when possible """ -import warnings import numpy as np from numpy.testing import (assert_array_equal, assert_array_almost_equal, assert_almost_equal) @@ -18,6 +17,8 @@ from sklearn.utils import ConvergenceWarning from sklearn.utils.fixes import unique from sklearn.utils.testing import assert_greater, assert_less +from sklearn.utils.testing import assert_warns + # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] @@ -640,7 +641,7 @@ def test_svc_clone_with_callable_kernel(): assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), - decimal=4) + decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) @@ -653,14 +654,7 @@ def test_svc_bad_kernel(): def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) - with warnings.catch_warnings(record=True) as foo: - # Hackish way to reset the warning counter - from sklearn.svm import base - base.__warningregistry__ = {} - warnings.simplefilter("always") - a.fit(X, Y) - assert_equal(len(foo), 1, msg=foo) - assert_equal(foo[0].category, ConvergenceWarning, msg=foo[0].category) + assert_warns(ConvergenceWarning, a.fit, X, Y) def test_consistent_proba(): diff --git a/sklearn/utils/__init__.py b/sklearn/utils/__init__.py index 662233ff00e60..f68904a5086f6 100644 --- a/sklearn/utils/__init__.py +++ b/sklearn/utils/__init__.py @@ -1,7 +1,7 @@ """ The :mod:`sklearn.utils` module includes various utilities. """ - +import sys from collections import Sequence import numpy as np diff --git a/sklearn/utils/testing.py b/sklearn/utils/testing.py index 120d132ecc4b9..c958ebeb08740 100644 --- a/sklearn/utils/testing.py +++ b/sklearn/utils/testing.py @@ -6,10 +6,12 @@ # Mathieu Blondel # Olivier Grisel # Arnaud Joly +# Denis Engemann # License: BSD 3 clause import inspect import pkgutil import warnings +import sys import scipy as sp from functools import wraps @@ -77,15 +79,83 @@ def _assert_greater(a, b, msg=None): assert a > b, message + # To remove when we support numpy 1.7 def assert_warns(warning_class, func, *args, **kw): + """Test that a certain warning occurs. + + Parameters + ---------- + warning_class : the warning class + The class to test for, e.g. UserWarning. + + func : callable + Calable object to trigger warnings. + + *args : the positional arguments to `func`. + + **kw : the keyword arguments to `func` + + Returns + ------- + + result : the return value of `func` + + """ + + # very important to avoid uncontrolled state propagation + clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") - # Trigger a warning. result = func(*args, **kw) + # Verify some things + if not len(w) > 0: + raise AssertionError("No warning raised when calling %s" + % func.__name__) + + if not w[0].category is warning_class: + raise AssertionError("First warning for %s is not a " + "%s( is %s)" + % (func.__name__, warning_class, w[0])) + + return result + + +def assert_warns_message(warning_class, message, func, *args, **kw): + # very important to avoid uncontrolled state propagation + """Test that a certain warning occurs and with a certain message. + + Parameters + ---------- + warning_class : the warning class + The class to test for, e.g. UserWarning. + + message : str | callable + The entire message or a substring to test for. If callable, + it takes a string as argument and will trigger an assertion error + if it returns `False`. + + func : callable + Calable object to trigger warnings. + + *args : the positional arguments to `func`. + + **kw : the keyword arguments to `func`. + Returns + ------- + + result : the return value of `func` + + """ + clean_warning_registry() + with warnings.catch_warnings(record=True) as w: + # Cause all warnings to always be triggered. + warnings.simplefilter("always") + # Trigger a warning. + result = func(*args, **kw) # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" @@ -96,13 +166,28 @@ def assert_warns(warning_class, func, *args, **kw): "%s( is %s)" % (func.__name__, warning_class, w[0])) + # substring will match, the entire message with typo won't + msg = w[0].message # For Python 3 compatibility + msg = str(msg.args[0] if hasattr(msg, 'args') else msg) + if callable(message): # add support for certain tests + check_in_message = message + else: + check_in_message = lambda msg : message in msg + if not check_in_message(msg): + raise AssertionError("The message received ('%s') for <%s> is " + "not the one you expected ('%s')" + % (msg, func.__name__, message + )) return result # To remove when we support numpy 1.7 def assert_no_warnings(func, *args, **kw): # XXX: once we may depend on python >= 2.6, this can be replaced by the + # warnings module context manager. + # very important to avoid uncontrolled state propagation + clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') @@ -113,17 +198,102 @@ def assert_no_warnings(func, *args, **kw): return result -def ignore_warnings(fn): +def ignore_warnings(obj=None): + """ Context manager and decorator to ignore warnings + + Note. Using this (in both variants) will clear all warnings + from all python modules loaded. In case you need to test + cross-module-warning-logging this is not your tool of choice. + + Examples + -------- + >>> with ignore_warnings(): + ... warnings.warn('buhuhuhu') + + >>> def nasty_warn(): + ... warnings.warn('buhuhuhu') + ... print 42 + + >>> ignore_warnings(nasty_warn)() + 42 + + """ + if callable(obj): + return _ignore_warnings(obj) + else: + return _IgnoreWarnings() + + +def _ignore_warnings(fn): """Decorator to catch and hide warnings without visual nesting""" @wraps(fn) def wrapper(*args, **kwargs): + # very important to avoid uncontrolled state propagation + clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') return fn(*args, **kwargs) w[:] = [] + return wrapper +class _IgnoreWarnings(object): + + """Improved and simplified Python warnings context manager + + Copied from Python 2.7.5 and modified as required. + """ + + def __init__(self): + """ + Parameters + ========== + category : warning class + The category to filter. Defaults to Warning. If None, + all categories will be muted. + """ + self._record = True + self._module = sys.modules['warnings'] + self._entered = False + self.log = [] + + def __repr__(self): + args = [] + if self._record: + args.append("record=True") + if self._module is not sys.modules['warnings']: + args.append("module=%r" % self._module) + name = type(self).__name__ + return "%s(%s)" % (name, ", ".join(args)) + + def __enter__(self): + clean_warning_registry() # be safe and not propagate state + chaos + warnings.simplefilter('always') + if self._entered: + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + if self._record: + self.log = [] + def showwarning(*args, **kwargs): + self.log.append(warnings.WarningMessage(*args, **kwargs)) + self._module.showwarning = showwarning + return self.log + else: + return None + + def __exit__(self, *exc_info): + if not self._entered: + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + self.log[:] = [] + clean_warning_registry() # be safe and not propagate state + chaos + + try: from nose.tools import assert_less except ImportError: @@ -358,3 +528,12 @@ def run_test(*args, **kwargs): else: return func(*args, **kwargs) return run_test + + +def clean_warning_registry(): + """Safe way to reset warniings """ + warnings.resetwarnings() + reg = "__warningregistry__" + for mod in sys.modules.values(): + if hasattr(mod, reg): + getattr(mod, reg).clear() diff --git a/sklearn/utils/tests/test_extmath.py b/sklearn/utils/tests/test_extmath.py index 78d39e9569c60..c15a64150ba9c 100644 --- a/sklearn/utils/tests/test_extmath.py +++ b/sklearn/utils/tests/test_extmath.py @@ -3,7 +3,6 @@ # Denis Engemann # # License: BSD 3 clause -import warnings import numpy as np from scipy import sparse from scipy import linalg @@ -16,6 +15,7 @@ from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises, assert_raise_message +from sklearn.utils.testing import assert_warns from sklearn.utils.extmath import density from sklearn.utils.extmath import logsumexp @@ -376,3 +376,7 @@ def test_fast_dot(): if has_blas: for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]: assert_raises(ValueError, _fast_dot, x, x.T) + +if __name__ == '__main__': + import nose + nose.runmodule()