diff --git a/sklearn/preprocessing/_data.py b/sklearn/preprocessing/_data.py index d72b0294fa4f4..03a999ffba49e 100644 --- a/sklearn/preprocessing/_data.py +++ b/sklearn/preprocessing/_data.py @@ -120,6 +120,15 @@ def _handle_zeros_in_scale(scale, copy=True, constant_mask=None): return scale +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + "with_mean": ["boolean"], + "with_std": ["boolean"], + "copy": ["boolean"], + } +) def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis. @@ -132,7 +141,7 @@ def scale(X, *, axis=0, with_mean=True, with_std=True, copy=True): X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to center and scale. - axis : int, default=0 + axis : {0, 1}, default=0 Axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. diff --git a/sklearn/tests/test_public_functions.py b/sklearn/tests/test_public_functions.py index e127369072828..9581de7630788 100644 --- a/sklearn/tests/test_public_functions.py +++ b/sklearn/tests/test_public_functions.py @@ -210,6 +210,7 @@ def _check_function_param_validation( "sklearn.metrics.top_k_accuracy_score", "sklearn.metrics.zero_one_loss", "sklearn.model_selection.train_test_split", + "sklearn.preprocessing.scale", "sklearn.random_projection.johnson_lindenstrauss_min_dim", "sklearn.svm.l1_min_c", "sklearn.tree.export_text",