diff --git a/sklearn/metrics/ranking.py b/sklearn/metrics/ranking.py index 2003ed8b314c8..fde1f1c441125 100644 --- a/sklearn/metrics/ranking.py +++ b/sklearn/metrics/ranking.py @@ -116,7 +116,7 @@ def average_precision_score(y_true, y_score, average="macro", Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] - True binary labels in binary label indicators. + True binary labels (either {0, 1} or {-1, 1}). y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive @@ -200,7 +200,7 @@ def roc_auc_score(y_true, y_score, average="macro", sample_weight=None): Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] - True binary labels in binary label indicators. + True binary labels (either {0, 1} or {-1, 1}). y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive @@ -438,8 +438,8 @@ def roc_curve(y_true, y_score, pos_label=None, sample_weight=None, ---------- y_true : array, shape = [n_samples] - True binary labels in range {0, 1} or {-1, 1}. If labels are not - binary, pos_label should be explicitly given. + True binary labels. If labels are not either {-1, 1} or {0, 1}, then + pos_label should be explicitly given. y_score : array, shape = [n_samples] Target scores, can either be probability estimates of the positive