diff --git a/sklearn/preprocessing/_data.py b/sklearn/preprocessing/_data.py index 59c7b68a8d528..8df4a291555ff 100644 --- a/sklearn/preprocessing/_data.py +++ b/sklearn/preprocessing/_data.py @@ -1292,6 +1292,13 @@ def _more_tags(self): return {"allow_nan": True} +@validate_params( + { + "X": ["array-like", "sparse matrix"], + "axis": [Options(Integral, {0, 1})], + "copy": ["boolean"], + } +) def maxabs_scale(X, *, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. @@ -1306,7 +1313,7 @@ def maxabs_scale(X, *, axis=0, copy=True): X : {array-like, sparse matrix} of shape (n_samples, n_features) The data. - axis : int, default=0 + axis : {0, 1}, default=0 Axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. diff --git a/sklearn/tests/test_public_functions.py b/sklearn/tests/test_public_functions.py index f354915deed5d..5b5057bfb97c3 100644 --- a/sklearn/tests/test_public_functions.py +++ b/sklearn/tests/test_public_functions.py @@ -223,6 +223,7 @@ def _check_function_param_validation( "sklearn.model_selection.train_test_split", "sklearn.preprocessing.add_dummy_feature", "sklearn.preprocessing.binarize", + "sklearn.preprocessing.maxabs_scale", "sklearn.preprocessing.scale", "sklearn.random_projection.johnson_lindenstrauss_min_dim", "sklearn.svm.l1_min_c",