Skip to content

Commit e95815a

Browse files
raghavrvamueller
authored andcommitted
MAINT docstring --> comments to prevent nose from using doc in verbose mode
1 parent b278440 commit e95815a

File tree

83 files changed

+899
-1233
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

83 files changed

+899
-1233
lines changed

sklearn/cluster/tests/test_affinity_propagation.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222

2323
def test_affinity_propagation():
24-
"""Affinity Propagation algorithm """
24+
# Affinity Propagation algorithm
2525
# Compute similarities
2626
S = -euclidean_distances(X, squared=True)
2727
preference = np.median(S) * 10
@@ -60,15 +60,15 @@ def test_affinity_propagation():
6060

6161

6262
def test_affinity_propagation_predict():
63-
"""Test AffinityPropagation.predict"""
63+
# Test AffinityPropagation.predict
6464
af = AffinityPropagation(affinity="euclidean")
6565
labels = af.fit_predict(X)
6666
labels2 = af.predict(X)
6767
assert_array_equal(labels, labels2)
6868

6969

7070
def test_affinity_propagation_predict_error():
71-
"""Test exception in AffinityPropagation.predict"""
71+
# Test exception in AffinityPropagation.predict
7272
# Not fitted.
7373
af = AffinityPropagation(affinity="euclidean")
7474
assert_raises(ValueError, af.predict, X)

sklearn/cluster/tests/test_bicluster.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def test_get_submatrix():
5555

5656

5757
def _test_shape_indices(model):
58-
"""Test get_shape and get_indices on fitted model."""
58+
# Test get_shape and get_indices on fitted model.
5959
for i in range(model.n_clusters):
6060
m, n = model.get_shape(i)
6161
i_ind, j_ind = model.get_indices(i)
@@ -64,7 +64,7 @@ def _test_shape_indices(model):
6464

6565

6666
def test_spectral_coclustering():
67-
"""Test Dhillon's Spectral CoClustering on a simple problem."""
67+
# Test Dhillon's Spectral CoClustering on a simple problem.
6868
param_grid = {'svd_method': ['randomized', 'arpack'],
6969
'n_svd_vecs': [None, 20],
7070
'mini_batch': [False, True],
@@ -93,7 +93,7 @@ def test_spectral_coclustering():
9393

9494

9595
def test_spectral_biclustering():
96-
"""Test Kluger methods on a checkerboard dataset."""
96+
# Test Kluger methods on a checkerboard dataset.
9797
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
9898
random_state=0)
9999

sklearn/cluster/tests/test_birch.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222

2323

2424
def test_n_samples_leaves_roots():
25-
"""Sanity check for the number of samples in leaves and roots"""
25+
# Sanity check for the number of samples in leaves and roots
2626
X, y = make_blobs(n_samples=10)
2727
brc = Birch()
2828
brc.fit(X)
@@ -34,7 +34,7 @@ def test_n_samples_leaves_roots():
3434

3535

3636
def test_partial_fit():
37-
"""Test that fit is equivalent to calling partial_fit multiple times"""
37+
# Test that fit is equivalent to calling partial_fit multiple times
3838
X, y = make_blobs(n_samples=100)
3939
brc = Birch(n_clusters=3)
4040
brc.fit(X)
@@ -52,7 +52,7 @@ def test_partial_fit():
5252

5353

5454
def test_birch_predict():
55-
"""Test the predict method predicts the nearest centroid."""
55+
# Test the predict method predicts the nearest centroid.
5656
rng = np.random.RandomState(0)
5757
X = generate_clustered_data(n_clusters=3, n_features=3,
5858
n_samples_per_cluster=10)
@@ -70,7 +70,7 @@ def test_birch_predict():
7070

7171

7272
def test_n_clusters():
73-
"""Test that n_clusters param works properly"""
73+
# Test that n_clusters param works properly
7474
X, y = make_blobs(n_samples=100, centers=10)
7575
brc1 = Birch(n_clusters=10)
7676
brc1.fit(X)
@@ -96,7 +96,7 @@ def test_n_clusters():
9696

9797

9898
def test_sparse_X():
99-
"""Test that sparse and dense data give same results"""
99+
# Test that sparse and dense data give same results
100100
X, y = make_blobs(n_samples=100, centers=10)
101101
brc = Birch(n_clusters=10)
102102
brc.fit(X)
@@ -119,7 +119,7 @@ def check_branching_factor(node, branching_factor):
119119

120120

121121
def test_branching_factor():
122-
"""Test that nodes have at max branching_factor number of subclusters"""
122+
# Test that nodes have at max branching_factor number of subclusters
123123
X, y = make_blobs()
124124
branching_factor = 9
125125

@@ -149,7 +149,7 @@ def check_threshold(birch_instance, threshold):
149149

150150

151151
def test_threshold():
152-
"""Test that the leaf subclusters have a threshold lesser than radius"""
152+
# Test that the leaf subclusters have a threshold lesser than radius
153153
X, y = make_blobs(n_samples=80, centers=4)
154154
brc = Birch(threshold=0.5, n_clusters=None)
155155
brc.fit(X)

sklearn/cluster/tests/test_dbscan.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525

2626

2727
def test_dbscan_similarity():
28-
"""Tests the DBSCAN algorithm with a similarity array."""
28+
# Tests the DBSCAN algorithm with a similarity array.
2929
# Parameters chosen specifically for this task.
3030
eps = 0.15
3131
min_samples = 10
@@ -48,7 +48,7 @@ def test_dbscan_similarity():
4848

4949

5050
def test_dbscan_feature():
51-
"""Tests the DBSCAN algorithm with a feature vector array."""
51+
# Tests the DBSCAN algorithm with a feature vector array.
5252
# Parameters chosen specifically for this task.
5353
# Different eps to other test, because distance is not normalised.
5454
eps = 0.8
@@ -91,7 +91,7 @@ def test_dbscan_no_core_samples():
9191

9292

9393
def test_dbscan_callable():
94-
"""Tests the DBSCAN algorithm with a callable metric."""
94+
# Tests the DBSCAN algorithm with a callable metric.
9595
# Parameters chosen specifically for this task.
9696
# Different eps to other test, because distance is not normalised.
9797
eps = 0.8
@@ -117,7 +117,7 @@ def test_dbscan_callable():
117117

118118

119119
def test_dbscan_balltree():
120-
"""Tests the DBSCAN algorithm with balltree for neighbor calculation."""
120+
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
121121
eps = 0.8
122122
min_samples = 10
123123

@@ -156,13 +156,13 @@ def test_dbscan_balltree():
156156

157157

158158
def test_input_validation():
159-
"""DBSCAN.fit should accept a list of lists."""
159+
# DBSCAN.fit should accept a list of lists.
160160
X = [[1., 2.], [3., 4.]]
161161
DBSCAN().fit(X) # must not raise exception
162162

163163

164164
def test_dbscan_badargs():
165-
"""Test bad argument values: these should all raise ValueErrors"""
165+
# Test bad argument values: these should all raise ValueErrors
166166
assert_raises(ValueError,
167167
dbscan,
168168
X, eps=-1.0)

sklearn/cluster/tests/test_hierarchical.py

Lines changed: 16 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,7 @@ def test_linkage_misc():
6161

6262

6363
def test_structured_linkage_tree():
64-
"""
65-
Check that we obtain the correct solution for structured linkage trees.
66-
"""
64+
# Check that we obtain the correct solution for structured linkage trees.
6765
rng = np.random.RandomState(0)
6866
mask = np.ones([10, 10], dtype=np.bool)
6967
# Avoiding a mask with only 'True' entries
@@ -85,9 +83,7 @@ def test_structured_linkage_tree():
8583

8684

8785
def test_unstructured_linkage_tree():
88-
"""
89-
Check that we obtain the correct solution for unstructured linkage trees.
90-
"""
86+
# Check that we obtain the correct solution for unstructured linkage trees.
9187
rng = np.random.RandomState(0)
9288
X = rng.randn(50, 100)
9389
for this_X in (X, X[0]):
@@ -110,9 +106,7 @@ def test_unstructured_linkage_tree():
110106

111107

112108
def test_height_linkage_tree():
113-
"""
114-
Check that the height of the results of linkage tree is sorted.
115-
"""
109+
# Check that the height of the results of linkage tree is sorted.
116110
rng = np.random.RandomState(0)
117111
mask = np.ones([10, 10], dtype=np.bool)
118112
X = rng.randn(50, 100)
@@ -124,10 +118,8 @@ def test_height_linkage_tree():
124118

125119

126120
def test_agglomerative_clustering():
127-
"""
128-
Check that we obtain the correct number of clusters with
129-
agglomerative clustering.
130-
"""
121+
# Check that we obtain the correct number of clusters with
122+
# agglomerative clustering.
131123
rng = np.random.RandomState(0)
132124
mask = np.ones([10, 10], dtype=np.bool)
133125
n_samples = 100
@@ -214,9 +206,7 @@ def test_agglomerative_clustering():
214206

215207

216208
def test_ward_agglomeration():
217-
"""
218-
Check that we obtain the correct solution in a simplistic case
219-
"""
209+
# Check that we obtain the correct solution in a simplistic case
220210
rng = np.random.RandomState(0)
221211
mask = np.ones([10, 10], dtype=np.bool)
222212
X = rng.randn(50, 100)
@@ -254,8 +244,7 @@ def assess_same_labelling(cut1, cut2):
254244

255245

256246
def test_scikit_vs_scipy():
257-
"""Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
258-
"""
247+
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
259248
n, p, k = 10, 5, 3
260249
rng = np.random.RandomState(0)
261250

@@ -282,10 +271,8 @@ def test_scikit_vs_scipy():
282271

283272

284273
def test_connectivity_propagation():
285-
"""
286-
Check that connectivity in the ward tree is propagated correctly during
287-
merging.
288-
"""
274+
# Check that connectivity in the ward tree is propagated correctly during
275+
# merging.
289276
X = np.array([(.014, .120), (.014, .099), (.014, .097),
290277
(.017, .153), (.017, .153), (.018, .153),
291278
(.018, .153), (.018, .153), (.018, .153),
@@ -300,10 +287,8 @@ def test_connectivity_propagation():
300287

301288

302289
def test_ward_tree_children_order():
303-
"""
304-
Check that children are ordered in the same way for both structured and
305-
unstructured versions of ward_tree.
306-
"""
290+
# Check that children are ordered in the same way for both structured and
291+
# unstructured versions of ward_tree.
307292

308293
# test on five random datasets
309294
n, p = 10, 5
@@ -322,7 +307,7 @@ def test_ward_tree_children_order():
322307

323308

324309
def test_ward_linkage_tree_return_distance():
325-
"""Test return_distance option on linkage and ward trees"""
310+
# Test return_distance option on linkage and ward trees
326311

327312
# test that return_distance when set true, gives same
328313
# output on both structured and unstructured clustering.
@@ -429,10 +414,8 @@ def test_ward_linkage_tree_return_distance():
429414

430415

431416
def test_connectivity_fixing_non_lil():
432-
"""
433-
Check non regression of a bug if a non item assignable connectivity is
434-
provided with more than one component.
435-
"""
417+
# Check non regression of a bug if a non item assignable connectivity is
418+
# provided with more than one component.
436419
# create dummy data
437420
x = np.array([[0, 0], [1, 1]])
438421
# create a mask with several components to force connectivity fixing
@@ -484,7 +467,7 @@ def test_connectivity_ignores_diagonal():
484467

485468

486469
def test_compute_full_tree():
487-
"""Test that the full tree is computed if n_clusters is small"""
470+
# Test that the full tree is computed if n_clusters is small
488471
rng = np.random.RandomState(0)
489472
X = rng.randn(10, 2)
490473
connectivity = kneighbors_graph(X, 5, include_self=False)
@@ -511,7 +494,7 @@ def test_compute_full_tree():
511494

512495

513496
def test_n_components():
514-
"""Test n_components returned by linkage, average and ward tree"""
497+
# Test n_components returned by linkage, average and ward tree
515498
rng = np.random.RandomState(0)
516499
X = rng.rand(5, 5)
517500

sklearn/cluster/tests/test_k_means.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def test_labels_assignment_and_inertia():
8181

8282

8383
def test_minibatch_update_consistency():
84-
"""Check that dense and sparse minibatch update give the same results"""
84+
# Check that dense and sparse minibatch update give the same results
8585
rng = np.random.RandomState(42)
8686
old_centers = centers + rng.normal(size=centers.shape)
8787

@@ -480,7 +480,7 @@ def test_mini_match_k_means_invalid_init():
480480

481481

482482
def test_k_means_copyx():
483-
"""Check if copy_x=False returns nearly equal X after de-centering."""
483+
# Check if copy_x=False returns nearly equal X after de-centering.
484484
my_X = X.copy()
485485
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
486486
km.fit(my_X)
@@ -491,13 +491,11 @@ def test_k_means_copyx():
491491

492492

493493
def test_k_means_non_collapsed():
494-
"""Check k_means with a bad initialization does not yield a singleton
495-
496-
Starting with bad centers that are quickly ignored should not
497-
result in a repositioning of the centers to the center of mass that
498-
would lead to collapsed centers which in turns make the clustering
499-
dependent of the numerical unstabilities.
500-
"""
494+
# Check k_means with a bad initialization does not yield a singleton
495+
# Starting with bad centers that are quickly ignored should not
496+
# result in a repositioning of the centers to the center of mass that
497+
# would lead to collapsed centers which in turns make the clustering
498+
# dependent of the numerical unstabilities.
501499
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
502500
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
503501
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
@@ -630,7 +628,7 @@ def test_fit_transform():
630628

631629

632630
def test_n_init():
633-
"""Check that increasing the number of init increases the quality"""
631+
# Check that increasing the number of init increases the quality
634632
n_runs = 5
635633
n_init_range = [1, 5, 10]
636634
inertia = np.zeros((len(n_init_range), n_runs))

sklearn/cluster/tests/test_mean_shift.py

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,13 @@
2525

2626

2727
def test_estimate_bandwidth():
28-
"""Test estimate_bandwidth"""
28+
# Test estimate_bandwidth
2929
bandwidth = estimate_bandwidth(X, n_samples=200)
3030
assert_true(0.9 <= bandwidth <= 1.5)
3131

3232

3333
def test_mean_shift():
34-
""" Test MeanShift algorithm """
34+
# Test MeanShift algorithm
3535
bandwidth = 1.2
3636

3737
ms = MeanShift(bandwidth=bandwidth)
@@ -47,7 +47,7 @@ def test_mean_shift():
4747

4848

4949
def test_meanshift_predict():
50-
"""Test MeanShift.predict"""
50+
# Test MeanShift.predict
5151
ms = MeanShift(bandwidth=1.2)
5252
labels = ms.fit_predict(X)
5353
labels2 = ms.predict(X)
@@ -62,17 +62,15 @@ def test_meanshift_all_orphans():
6262

6363

6464
def test_unfitted():
65-
"""Non-regression: before fit, there should be not fitted attributes."""
65+
# Non-regression: before fit, there should be not fitted attributes.
6666
ms = MeanShift()
6767
assert_false(hasattr(ms, "cluster_centers_"))
6868
assert_false(hasattr(ms, "labels_"))
6969

7070

7171
def test_bin_seeds():
72-
"""
73-
Test the bin seeding technique which can be used in the mean shift
74-
algorithm
75-
"""
72+
# Test the bin seeding technique which can be used in the mean shift
73+
# algorithm
7674
# Data is just 6 points in the plane
7775
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
7876
[2., 1.], [2.1, 1.1], [0., 0.]])

0 commit comments

Comments
 (0)