From 8538305afbf31e6a92ed70c6029f0d6078833e14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Rodrigues?= Date: Wed, 26 Mar 2025 12:06:57 +0000 Subject: [PATCH 1/3] Fixes #30400: update index finding with np.where(condition) Changed np.where(condition) to condition.nonzero() in multiple examples. --- examples/applications/plot_species_distribution_modeling.py | 2 +- examples/applications/plot_stock_market.py | 2 +- examples/bicluster/plot_bicluster_newsgroups.py | 2 +- examples/cluster/plot_hdbscan.py | 2 +- examples/decomposition/plot_sparse_coding.py | 2 +- examples/ensemble/plot_adaboost_twoclass.py | 2 +- examples/linear_model/plot_sgd_iris.py | 2 +- examples/manifold/plot_mds.py | 2 +- examples/miscellaneous/plot_multilabel.py | 4 ++-- .../plot_label_propagation_digits_active_learning.py | 2 +- examples/semi_supervised/plot_label_propagation_structure.py | 4 ++-- examples/svm/plot_linearsvc_support_vectors.py | 2 +- examples/tree/plot_iris_dtc.py | 2 +- 13 files changed, 15 insertions(+), 15 deletions(-) diff --git a/examples/applications/plot_species_distribution_modeling.py b/examples/applications/plot_species_distribution_modeling.py index 5b0d30bc4c8bf..dc3bd7591a11a 100644 --- a/examples/applications/plot_species_distribution_modeling.py +++ b/examples/applications/plot_species_distribution_modeling.py @@ -194,7 +194,7 @@ def plot_species_distribution( Z = np.ones((data.Ny, data.Nx), dtype=np.float64) # We'll predict only for the land points. - idx = np.where(land_reference > -9999) + idx = (land_reference > -9999).nonzero() coverages_land = data.coverages[:, idx[0], idx[1]].T pred = clf.decision_function((coverages_land - mean) / std) diff --git a/examples/applications/plot_stock_market.py b/examples/applications/plot_stock_market.py index 74f60ffa00c15..40f778c785723 100644 --- a/examples/applications/plot_stock_market.py +++ b/examples/applications/plot_stock_market.py @@ -213,7 +213,7 @@ ) # Plot the edges -start_idx, end_idx = np.where(non_zero) +start_idx, end_idx = non_zero.nonzero() # a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [ diff --git a/examples/bicluster/plot_bicluster_newsgroups.py b/examples/bicluster/plot_bicluster_newsgroups.py index aed7037086168..054fb0ba399e1 100644 --- a/examples/bicluster/plot_bicluster_newsgroups.py +++ b/examples/bicluster/plot_bicluster_newsgroups.py @@ -147,7 +147,7 @@ def bicluster_ncut(i): # words out_of_cluster_docs = cocluster.row_labels_ != cluster - out_of_cluster_docs = np.where(out_of_cluster_docs)[0] + out_of_cluster_docs = out_of_cluster_docs.nonzero()[0] word_col = X[:, cluster_words] word_scores = np.array( word_col[cluster_docs, :].sum(axis=0) diff --git a/examples/cluster/plot_hdbscan.py b/examples/cluster/plot_hdbscan.py index 64d4936694bf3..eee221d578ca3 100644 --- a/examples/cluster/plot_hdbscan.py +++ b/examples/cluster/plot_hdbscan.py @@ -40,7 +40,7 @@ def plot(X, labels, probabilities=None, parameters=None, ground_truth=False, ax= # Black used for noise. col = [0, 0, 0, 1] - class_index = np.where(labels == k)[0] + class_index = (labels == k).nonzero()[0] for ci in class_index: ax.plot( X[ci, 0], diff --git a/examples/decomposition/plot_sparse_coding.py b/examples/decomposition/plot_sparse_coding.py index 778f718c2ac87..a3456b553486c 100644 --- a/examples/decomposition/plot_sparse_coding.py +++ b/examples/decomposition/plot_sparse_coding.py @@ -106,7 +106,7 @@ def ricker_matrix(width, resolution, n_components): dictionary=D, transform_algorithm="threshold", transform_alpha=20 ) x = coder.transform(y.reshape(1, -1)) - _, idx = np.where(x != 0) + _, idx = (x != 0).nonzero() x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y, rcond=None) x = np.ravel(np.dot(x, D)) squared_error = np.sum((y - x) ** 2) diff --git a/examples/ensemble/plot_adaboost_twoclass.py b/examples/ensemble/plot_adaboost_twoclass.py index c499a9f6dc44b..18a2a10841c1c 100644 --- a/examples/ensemble/plot_adaboost_twoclass.py +++ b/examples/ensemble/plot_adaboost_twoclass.py @@ -65,7 +65,7 @@ # Plot the training points for i, n, c in zip(range(2), class_names, plot_colors): - idx = np.where(y == i) + idx = (y == i).nonzero() plt.scatter( X[idx, 0], X[idx, 1], diff --git a/examples/linear_model/plot_sgd_iris.py b/examples/linear_model/plot_sgd_iris.py index 46dc2e7c31cd1..e8aaf3a2e13a2 100644 --- a/examples/linear_model/plot_sgd_iris.py +++ b/examples/linear_model/plot_sgd_iris.py @@ -55,7 +55,7 @@ # Plot also the training points for i, color in zip(clf.classes_, colors): - idx = np.where(y == i) + idx = (y == i).nonzero() plt.scatter( X[idx, 0], X[idx, 1], diff --git a/examples/manifold/plot_mds.py b/examples/manifold/plot_mds.py index afea676b245a8..d35423ad51367 100644 --- a/examples/manifold/plot_mds.py +++ b/examples/manifold/plot_mds.py @@ -89,7 +89,7 @@ plt.legend(scatterpoints=1, loc="best", shadow=False) # Plot the edges -start_idx, end_idx = np.where(X_mds) +start_idx, end_idx = X_mds.nonzero() # a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [ diff --git a/examples/miscellaneous/plot_multilabel.py b/examples/miscellaneous/plot_multilabel.py index 9d08ad3fa7907..4c88dbe1838f2 100644 --- a/examples/miscellaneous/plot_multilabel.py +++ b/examples/miscellaneous/plot_multilabel.py @@ -71,8 +71,8 @@ def plot_subfigure(X, Y, subplot, title, transform): plt.subplot(2, 2, subplot) plt.title(title) - zero_class = np.where(Y[:, 0]) - one_class = np.where(Y[:, 1]) + zero_class = (Y[:, 0]).nonzero() + one_class = (Y[:, 1]).nonzero() plt.scatter(X[:, 0], X[:, 1], s=40, c="gray", edgecolors=(0, 0, 0)) plt.scatter( X[zero_class, 0], diff --git a/examples/semi_supervised/plot_label_propagation_digits_active_learning.py b/examples/semi_supervised/plot_label_propagation_digits_active_learning.py index 1e03f528acdb8..36183a8f6bfe5 100644 --- a/examples/semi_supervised/plot_label_propagation_digits_active_learning.py +++ b/examples/semi_supervised/plot_label_propagation_digits_active_learning.py @@ -108,7 +108,7 @@ sub.axis("off") # labeling 5 points, remote from labeled set - (delete_index,) = np.where(unlabeled_indices == image_index) + (delete_index,) = (unlabeled_indices == image_index).nonzero() delete_indices = np.concatenate((delete_indices, delete_index)) unlabeled_indices = np.delete(unlabeled_indices, delete_indices) diff --git a/examples/semi_supervised/plot_label_propagation_structure.py b/examples/semi_supervised/plot_label_propagation_structure.py index 8a1798c84edf4..2b44c51923686 100644 --- a/examples/semi_supervised/plot_label_propagation_structure.py +++ b/examples/semi_supervised/plot_label_propagation_structure.py @@ -78,8 +78,8 @@ # when the label was unknown. output_labels = label_spread.transduction_ output_label_array = np.asarray(output_labels) -outer_numbers = np.where(output_label_array == outer)[0] -inner_numbers = np.where(output_label_array == inner)[0] +outer_numbers = (output_label_array == outer).nonzero()[0] +inner_numbers = (output_label_array == inner).nonzero()[0] plt.figure(figsize=(4, 4)) plt.scatter( diff --git a/examples/svm/plot_linearsvc_support_vectors.py b/examples/svm/plot_linearsvc_support_vectors.py index 021e1c6b55962..370f826d11a64 100644 --- a/examples/svm/plot_linearsvc_support_vectors.py +++ b/examples/svm/plot_linearsvc_support_vectors.py @@ -31,7 +31,7 @@ # decision_function = np.dot(X, clf.coef_[0]) + clf.intercept_[0] # The support vectors are the samples that lie within the margin # boundaries, whose size is conventionally constrained to 1 - support_vector_indices = np.where(np.abs(decision_function) <= 1 + 1e-15)[0] + support_vector_indices = (np.abs(decision_function) <= 1 + 1e-15).nonzero()[0] support_vectors = X[support_vector_indices] plt.subplot(1, 2, i + 1) diff --git a/examples/tree/plot_iris_dtc.py b/examples/tree/plot_iris_dtc.py index 9d4298919d515..54c9e85299829 100644 --- a/examples/tree/plot_iris_dtc.py +++ b/examples/tree/plot_iris_dtc.py @@ -63,7 +63,7 @@ # Plot the training points for i, color in zip(range(n_classes), plot_colors): - idx = np.where(y == i) + idx = (y == i).nonzero() plt.scatter( X[idx, 0], X[idx, 1], From 597569da3d2f4cc02d7c7fe77b8bf65476bc1501 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Rodrigues?= Date: Mon, 31 Mar 2025 09:17:38 +0100 Subject: [PATCH 2/3] Update to use of np.asarray in one specific case to fix linting --- examples/tree/plot_iris_dtc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/tree/plot_iris_dtc.py b/examples/tree/plot_iris_dtc.py index 54c9e85299829..349f4a893511e 100644 --- a/examples/tree/plot_iris_dtc.py +++ b/examples/tree/plot_iris_dtc.py @@ -63,7 +63,7 @@ # Plot the training points for i, color in zip(range(n_classes), plot_colors): - idx = (y == i).nonzero() + idx = np.asarray(y == i).nonzero() plt.scatter( X[idx, 0], X[idx, 1], From d8372aabdbcd03f304063758e1a461f58dc95e8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20du=20Boisberranger?= Date: Fri, 11 Apr 2025 18:22:45 +0200 Subject: [PATCH 3/3] [doc build]