Skip to content

RF: autoreplace assert_true(...==...) with plain assert #12547

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Nov 11, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions sklearn/cluster/tests/test_affinity_propagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,5 +160,5 @@ def test_equal_similarities_and_preferences():
assert_false(_equal_similarities_and_preferences(S, np.array([0, 1])))

# Same preferences
assert_true(_equal_similarities_and_preferences(S, np.array([0, 0])))
assert_true(_equal_similarities_and_preferences(S, np.array(0)))
assert _equal_similarities_and_preferences(S, np.array([0, 0]))
assert _equal_similarities_and_preferences(S, np.array(0))
2 changes: 1 addition & 1 deletion sklearn/cluster/tests/test_bicluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_get_submatrix():
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
assert np.all(X != -1)


def _test_shape_indices(model):
Expand Down
20 changes: 10 additions & 10 deletions sklearn/cluster/tests/test_feature_agglomeration.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,24 +18,24 @@ def test_feature_agglomeration():
pooling_func=np.median)
assert_no_warnings(agglo_mean.fit, X)
assert_no_warnings(agglo_median.fit, X)
assert_true(np.size(np.unique(agglo_mean.labels_)) == n_clusters)
assert_true(np.size(np.unique(agglo_median.labels_)) == n_clusters)
assert_true(np.size(agglo_mean.labels_) == X.shape[1])
assert_true(np.size(agglo_median.labels_) == X.shape[1])
assert np.size(np.unique(agglo_mean.labels_)) == n_clusters
assert np.size(np.unique(agglo_median.labels_)) == n_clusters
assert np.size(agglo_mean.labels_) == X.shape[1]
assert np.size(agglo_median.labels_) == X.shape[1]

# Test transform
Xt_mean = agglo_mean.transform(X)
Xt_median = agglo_median.transform(X)
assert_true(Xt_mean.shape[1] == n_clusters)
assert_true(Xt_median.shape[1] == n_clusters)
assert_true(Xt_mean == np.array([1 / 3.]))
assert_true(Xt_median == np.array([0.]))
assert Xt_mean.shape[1] == n_clusters
assert Xt_median.shape[1] == n_clusters
assert Xt_mean == np.array([1 / 3.])
assert Xt_median == np.array([0.])

# Test inverse transform
X_full_mean = agglo_mean.inverse_transform(Xt_mean)
X_full_median = agglo_median.inverse_transform(Xt_median)
assert_true(np.unique(X_full_mean[0]).size == n_clusters)
assert_true(np.unique(X_full_median[0]).size == n_clusters)
assert np.unique(X_full_mean[0]).size == n_clusters
assert np.unique(X_full_median[0]).size == n_clusters

assert_array_almost_equal(agglo_mean.transform(X_full_mean),
Xt_mean)
Expand Down
16 changes: 8 additions & 8 deletions sklearn/cluster/tests/test_hierarchical.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def test_structured_linkage_tree():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
assert len(children) + n_leaves == n_nodes
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
Expand Down Expand Up @@ -114,7 +114,7 @@ def test_height_linkage_tree():
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
assert len(children) + n_leaves == n_nodes


def test_agglomerative_clustering_wrong_arg_memory():
Expand Down Expand Up @@ -152,7 +152,7 @@ def test_agglomerative_clustering():
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
assert np.size(np.unique(labels)) == 10
finally:
shutil.rmtree(tempdir)
# Turn caching off now
Expand All @@ -166,7 +166,7 @@ def test_agglomerative_clustering():
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
assert np.size(np.unique(clustering.labels_)) == 10
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
Expand Down Expand Up @@ -226,12 +226,12 @@ def test_ward_agglomeration():
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
assert np.size(np.unique(agglo.labels_)) == 5

X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
assert X_red.shape[1] == 5
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert np.unique(X_full[0]).size == 5
assert_array_almost_equal(agglo.transform(X_full), X_red)

# Check that fitting with no samples raises a ValueError
Expand Down Expand Up @@ -265,7 +265,7 @@ def assess_same_labelling(cut1, cut2):
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
assert (co_clust[0] == co_clust[1]).all()


def test_scikit_vs_scipy():
Expand Down
12 changes: 6 additions & 6 deletions sklearn/cluster/tests/test_k_means.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ def test_labels_assignment_and_inertia():
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
assert (mindist >= 0.0).all()
assert (labels_gold != -1).all()

sample_weight = None

Expand Down Expand Up @@ -565,9 +565,9 @@ def test_k_means_non_collapsed():
assert_equal(len(np.unique(km.labels_)), 3)

centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
assert np.linalg.norm(centers[0] - centers[1]) >= 0.1
assert np.linalg.norm(centers[0] - centers[2]) >= 0.1
assert np.linalg.norm(centers[1] - centers[2]) >= 0.1


@pytest.mark.parametrize('algo', ['full', 'elkan'])
Expand Down Expand Up @@ -689,7 +689,7 @@ def test_n_init():
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
assert inertia[i] >= inertia[i + 1], failure_msg


def test_k_means_function():
Expand Down
6 changes: 3 additions & 3 deletions sklearn/cluster/tests/test_mean_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
assert 0.9 <= bandwidth <= 1.5


def test_estimate_bandwidth_1sample():
Expand Down Expand Up @@ -125,14 +125,14 @@ def test_bin_seeds():
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
assert len(ground_truth.symmetric_difference(test_result)) == 0

# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
assert len(ground_truth.symmetric_difference(test_result)) == 0

# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
Expand Down
22 changes: 11 additions & 11 deletions sklearn/compose/tests/test_column_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def fit(self, X, y=None):
return self

def transform(self, X, y=None):
assert_true(isinstance(X, (pd.DataFrame, pd.Series)))
assert isinstance(X, (pd.DataFrame, pd.Series))
if isinstance(X, pd.Series):
X = X.to_frame()
return X
Expand Down Expand Up @@ -309,15 +309,15 @@ def test_column_transformer_sparse_array():
ct = ColumnTransformer([('trans', Trans(), col)],
remainder=remainder,
sparse_threshold=0.8)
assert_true(sparse.issparse(ct.fit_transform(X_sparse)))
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
res)

for col in [[0, 1], slice(0, 2)]:
ct = ColumnTransformer([('trans', Trans(), col)],
sparse_threshold=0.8)
assert_true(sparse.issparse(ct.fit_transform(X_sparse)))
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse),
X_res_both)
Expand Down Expand Up @@ -352,7 +352,7 @@ def test_column_transformer_sparse_stacking():
sparse_threshold=0.8)
col_trans.fit(X_array)
X_trans = col_trans.transform(X_array)
assert_true(sparse.issparse(X_trans))
assert sparse.issparse(X_trans)
assert_equal(X_trans.shape, (X_trans.shape[0], X_trans.shape[0] + 1))
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
assert len(col_trans.transformers_) == 2
Expand Down Expand Up @@ -597,11 +597,11 @@ def test_column_transformer_named_estimators():
('trans2', StandardScaler(with_std=False), [1])])
assert_false(hasattr(ct, 'transformers_'))
ct.fit(X_array)
assert_true(hasattr(ct, 'transformers_'))
assert_true(isinstance(ct.named_transformers_['trans1'], StandardScaler))
assert_true(isinstance(ct.named_transformers_.trans1, StandardScaler))
assert_true(isinstance(ct.named_transformers_['trans2'], StandardScaler))
assert_true(isinstance(ct.named_transformers_.trans2, StandardScaler))
assert hasattr(ct, 'transformers_')
assert isinstance(ct.named_transformers_['trans1'], StandardScaler)
assert isinstance(ct.named_transformers_.trans1, StandardScaler)
assert isinstance(ct.named_transformers_['trans2'], StandardScaler)
assert isinstance(ct.named_transformers_.trans2, StandardScaler)
assert_false(ct.named_transformers_.trans2.with_std)
# check it are fitted transformers
assert_equal(ct.named_transformers_.trans1.mean_, 1.)
Expand All @@ -613,12 +613,12 @@ def test_column_transformer_cloning():
ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit(X_array)
assert_false(hasattr(ct.transformers[0][1], 'mean_'))
assert_true(hasattr(ct.transformers_[0][1], 'mean_'))
assert hasattr(ct.transformers_[0][1], 'mean_')

ct = ColumnTransformer([('trans', StandardScaler(), [0])])
ct.fit_transform(X_array)
assert_false(hasattr(ct.transformers[0][1], 'mean_'))
assert_true(hasattr(ct.transformers_[0][1], 'mean_'))
assert hasattr(ct.transformers_[0][1], 'mean_')


def test_column_transformer_get_feature_names():
Expand Down
2 changes: 1 addition & 1 deletion sklearn/cross_decomposition/tests/test_pls.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def test_predict_transform_copy():
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
assert np.all(X.mean(axis=0) != 0)


def test_scale_and_stability():
Expand Down
6 changes: 3 additions & 3 deletions sklearn/datasets/tests/test_20news.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,14 +67,14 @@ def test_20news_vectorized():

# test subset = train
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert sp.isspmatrix_csr(bunch.data)
assert_equal(bunch.data.shape, (11314, 130107))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)

# test subset = test
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert sp.isspmatrix_csr(bunch.data)
assert_equal(bunch.data.shape, (7532, 130107))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
Expand All @@ -85,7 +85,7 @@ def test_20news_vectorized():

# test subset = all
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
assert_true(sp.isspmatrix_csr(bunch.data))
assert sp.isspmatrix_csr(bunch.data)
assert_equal(bunch.data.shape, (11314 + 7532, 130107))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
32 changes: 16 additions & 16 deletions sklearn/datasets/tests/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,15 @@ def test_data_home(data_home):
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=data_home)
assert_equal(data_home, data_home)
assert_true(os.path.exists(data_home))
assert os.path.exists(data_home)

# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))

# if the folder is missing it will be created again
data_home = get_data_home(data_home=data_home)
assert_true(os.path.exists(data_home))
assert os.path.exists(data_home)


def test_default_empty_load_files(load_files_root):
Expand Down Expand Up @@ -126,7 +126,7 @@ def test_load_sample_images():
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
assert res.DESCR
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")

Expand Down Expand Up @@ -166,9 +166,9 @@ def test_load_missing_sample_image_error():
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert res.target.size, 442
assert_equal(len(res.feature_names), 10)
assert_true(res.DESCR)
assert res.DESCR

# test return_X_y option
check_return_X_y(res, partial(load_diabetes))
Expand All @@ -179,9 +179,9 @@ def test_load_linnerud():
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
assert_true(os.path.exists(res.data_filename))
assert_true(os.path.exists(res.target_filename))
assert res.DESCR
assert os.path.exists(res.data_filename)
assert os.path.exists(res.target_filename)

# test return_X_y option
check_return_X_y(res, partial(load_linnerud))
Expand All @@ -192,8 +192,8 @@ def test_load_iris():
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
assert res.DESCR
assert os.path.exists(res.filename)

# test return_X_y option
check_return_X_y(res, partial(load_iris))
Expand All @@ -204,7 +204,7 @@ def test_load_wine():
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
assert res.DESCR

# test return_X_y option
check_return_X_y(res, partial(load_wine))
Expand All @@ -215,8 +215,8 @@ def test_load_breast_cancer():
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
assert res.DESCR
assert os.path.exists(res.filename)

# test return_X_y option
check_return_X_y(res, partial(load_breast_cancer))
Expand All @@ -227,8 +227,8 @@ def test_load_boston():
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
assert res.DESCR
assert os.path.exists(res.filename)

# test return_X_y option
check_return_X_y(res, partial(load_boston))
Expand Down Expand Up @@ -265,4 +265,4 @@ def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
assert "data" in dir(data)
4 changes: 2 additions & 2 deletions sklearn/datasets/tests/test_rcv1.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ def test_fetch_rcv1():
cat_list, s1 = data1.target_names.tolist(), data1.sample_id

# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert sp.issparse(X1)
assert sp.issparse(Y1)
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)

Expand Down
Loading