diff --git a/sklearn/metrics/_ranking.py b/sklearn/metrics/_ranking.py index 0d201bf99bc10..c506953076bef 100644 --- a/sklearn/metrics/_ranking.py +++ b/sklearn/metrics/_ranking.py @@ -1058,6 +1058,7 @@ def label_ranking_average_precision_score(y_true, y_score, *, sample_weight=None Returns ------- score : float + Ranking-based average precision score. Examples -------- @@ -1067,7 +1068,6 @@ def label_ranking_average_precision_score(y_true, y_score, *, sample_weight=None >>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]]) >>> label_ranking_average_precision_score(y_true, y_score) 0.416... - """ check_consistent_length(y_true, y_score, sample_weight) y_true = check_array(y_true, ensure_2d=False) diff --git a/sklearn/tests/test_docstrings.py b/sklearn/tests/test_docstrings.py index d1cba95083e73..72f98765a5eb0 100644 --- a/sklearn/tests/test_docstrings.py +++ b/sklearn/tests/test_docstrings.py @@ -40,7 +40,6 @@ "sklearn.metrics._plot.precision_recall_curve.plot_precision_recall_curve", "sklearn.metrics._ranking.coverage_error", "sklearn.metrics._ranking.dcg_score", - "sklearn.metrics._ranking.label_ranking_average_precision_score", "sklearn.metrics._ranking.roc_auc_score", "sklearn.metrics._ranking.roc_curve", "sklearn.metrics._ranking.top_k_accuracy_score",