diff --git a/sklearn/metrics/_ranking.py b/sklearn/metrics/_ranking.py index e3d46f5138fb2..7ec583177328c 100644 --- a/sklearn/metrics/_ranking.py +++ b/sklearn/metrics/_ranking.py @@ -1224,6 +1224,13 @@ def coverage_error(y_true, y_score, *, sample_weight=None): return np.average(coverage, weights=sample_weight) +@validate_params( + { + "y_true": ["array-like", "sparse matrix"], + "y_score": ["array-like"], + "sample_weight": ["array-like", None], + } +) def label_ranking_loss(y_true, y_score, *, sample_weight=None): """Compute Ranking loss measure. @@ -1242,10 +1249,10 @@ def label_ranking_loss(y_true, y_score, *, sample_weight=None): Parameters ---------- - y_true : {ndarray, sparse matrix} of shape (n_samples, n_labels) + y_true : {array-like, sparse matrix} of shape (n_samples, n_labels) True binary labels in binary indicator format. - y_score : ndarray of shape (n_samples, n_labels) + y_score : array-like of shape (n_samples, n_labels) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). diff --git a/sklearn/tests/test_public_functions.py b/sklearn/tests/test_public_functions.py index b50e469f80a1e..8729ce1f0869e 100644 --- a/sklearn/tests/test_public_functions.py +++ b/sklearn/tests/test_public_functions.py @@ -127,6 +127,7 @@ def _check_function_param_validation( "sklearn.metrics.f1_score", "sklearn.metrics.hamming_loss", "sklearn.metrics.jaccard_score", + "sklearn.metrics.label_ranking_loss", "sklearn.metrics.log_loss", "sklearn.metrics.matthews_corrcoef", "sklearn.metrics.max_error",