diff --git a/examples/calibration/plot_calibration_curve.py b/examples/calibration/plot_calibration_curve.py index 5f8ad621bc7a8..82b054aea4901 100644 --- a/examples/calibration/plot_calibration_curve.py +++ b/examples/calibration/plot_calibration_curve.py @@ -155,11 +155,11 @@ y_pred = clf.predict(X_test) scores["Classifier"].append(name) - for metric in [brier_score_loss, log_loss]: + for metric in [brier_score_loss, log_loss, roc_auc_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_prob[:, 1])) - for metric in [precision_score, recall_score, f1_score, roc_auc_score]: + for metric in [precision_score, recall_score, f1_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_pred)) @@ -300,11 +300,11 @@ def predict_proba(self, X): y_pred = clf.predict(X_test) scores["Classifier"].append(name) - for metric in [brier_score_loss, log_loss]: + for metric in [brier_score_loss, log_loss, roc_auc_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_prob[:, 1])) - for metric in [precision_score, recall_score, f1_score, roc_auc_score]: + for metric in [precision_score, recall_score, f1_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_pred))