@@ -377,7 +377,7 @@ You can create your own custom scorer object using
377
377
>>> import numpy as np
378
378
>>> def my_custom_loss_func(y_true, y_pred):
379
379
... diff = np.abs(y_true - y_pred).max()
380
- ... return np.log1p(diff)
380
+ ... return float( np.log1p(diff) )
381
381
...
382
382
>>> # score will negate the return value of my_custom_loss_func,
383
383
>>> # which will be np.log(2), 0.693, given the values for X
@@ -389,9 +389,9 @@ You can create your own custom scorer object using
389
389
>>> clf = DummyClassifier(strategy='most_frequent', random_state=0)
390
390
>>> clf = clf.fit(X, y)
391
391
>>> my_custom_loss_func(y, clf.predict(X))
392
- np.float64( 0.69...)
392
+ 0.69...
393
393
>>> score(clf, X, y)
394
- np.float64( -0.69...)
394
+ -0.69...
395
395
396
396
.. dropdown :: Custom scorer objects from scratch
397
397
@@ -673,10 +673,10 @@ where :math:`k` is the number of guesses allowed and :math:`1(x)` is the
673
673
... [0.2 , 0.4 , 0.3 ],
674
674
... [0.7 , 0.2 , 0.1 ]])
675
675
>>> top_k_accuracy_score(y_true, y_score, k = 2 )
676
- np.float64( 0.75)
676
+ 0.75
677
677
>>> # Not normalizing gives the number of "correctly" classified samples
678
678
>>> top_k_accuracy_score(y_true, y_score, k = 2 , normalize = False )
679
- np.int64(3)
679
+ 3.0
680
680
681
681
.. _balanced_accuracy_score :
682
682
@@ -786,7 +786,7 @@ and not for more than two annotators.
786
786
>>> labeling1 = [2 , 0 , 2 , 2 , 0 , 1 ]
787
787
>>> labeling2 = [0 , 0 , 2 , 2 , 0 , 2 ]
788
788
>>> cohen_kappa_score(labeling1, labeling2)
789
- np.float64( 0.4285714285714286)
789
+ 0.4285714285714286
790
790
791
791
.. _confusion_matrix :
792
792
@@ -837,9 +837,9 @@ false negatives and true positives as follows::
837
837
838
838
>>> y_true = [0, 0, 0, 1, 1, 1, 1, 1]
839
839
>>> y_pred = [0, 1, 0, 1, 0, 1, 0, 1]
840
- >>> tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
840
+ >>> tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel().tolist()
841
841
>>> tn, fp, fn, tp
842
- (np.int64(2), np.int64(1), np.int64(2), np.int64(3) )
842
+ (2, 1, 2, 3 )
843
843
844
844
.. rubric :: Examples
845
845
@@ -1115,7 +1115,7 @@ Here are some small examples in binary classification::
1115
1115
>>> threshold
1116
1116
array([0.1 , 0.35, 0.4 , 0.8 ])
1117
1117
>>> average_precision_score(y_true, y_scores)
1118
- np.float64( 0.83...)
1118
+ 0.83...
1119
1119
1120
1120
1121
1121
@@ -1234,19 +1234,19 @@ In the binary case::
1234
1234
>>> y_pred = np.array([[1, 1, 1],
1235
1235
... [1, 0, 0]])
1236
1236
>>> jaccard_score(y_true[0], y_pred[0])
1237
- np.float64( 0.6666...)
1237
+ 0.6666...
1238
1238
1239
1239
In the 2D comparison case (e.g. image similarity):
1240
1240
1241
1241
>>> jaccard_score(y_true, y_pred, average = " micro" )
1242
- np.float64( 0.6)
1242
+ 0.6
1243
1243
1244
1244
In the multilabel case with binary label indicators::
1245
1245
1246
1246
>>> jaccard_score(y_true, y_pred, average='samples')
1247
- np.float64( 0.5833...)
1247
+ 0.5833...
1248
1248
>>> jaccard_score(y_true, y_pred, average='macro')
1249
- np.float64( 0.6666...)
1249
+ 0.6666...
1250
1250
>>> jaccard_score(y_true, y_pred, average=None)
1251
1251
array([0.5, 0.5, 1. ])
1252
1252
@@ -1258,9 +1258,9 @@ multilabel problem::
1258
1258
>>> jaccard_score(y_true, y_pred, average=None)
1259
1259
array([1. , 0. , 0.33...])
1260
1260
>>> jaccard_score(y_true, y_pred, average='macro')
1261
- np.float64( 0.44...)
1261
+ 0.44...
1262
1262
>>> jaccard_score(y_true, y_pred, average='micro')
1263
- np.float64( 0.33...)
1263
+ 0.33...
1264
1264
1265
1265
.. _hinge_loss :
1266
1266
@@ -1315,7 +1315,7 @@ with a svm classifier in a binary class problem::
1315
1315
>>> pred_decision
1316
1316
array([-2.18..., 2.36..., 0.09...])
1317
1317
>>> hinge_loss([-1, 1, 1], pred_decision)
1318
- np.float64( 0.3...)
1318
+ 0.3...
1319
1319
1320
1320
Here is an example demonstrating the use of the :func: `hinge_loss ` function
1321
1321
with a svm classifier in a multiclass problem::
@@ -1329,7 +1329,7 @@ with a svm classifier in a multiclass problem::
1329
1329
>>> pred_decision = est.decision_function([[-1], [2], [3]])
1330
1330
>>> y_true = [0, 2, 3]
1331
1331
>>> hinge_loss(y_true, pred_decision, labels=labels)
1332
- np.float64( 0.56...)
1332
+ 0.56...
1333
1333
1334
1334
.. _log_loss :
1335
1335
@@ -1445,7 +1445,7 @@ function:
1445
1445
>>> y_true = [+ 1 , + 1 , + 1 , - 1 ]
1446
1446
>>> y_pred = [+ 1 , - 1 , + 1 , + 1 ]
1447
1447
>>> matthews_corrcoef(y_true, y_pred)
1448
- np.float64( -0.33...)
1448
+ -0.33...
1449
1449
1450
1450
.. rubric :: References
1451
1451
@@ -1640,12 +1640,12 @@ We can use the probability estimates corresponding to `clf.classes_[1]`.
1640
1640
1641
1641
>>> y_score = clf.predict_proba(X)[:, 1 ]
1642
1642
>>> roc_auc_score(y, y_score)
1643
- np.float64( 0.99...)
1643
+ 0.99...
1644
1644
1645
1645
Otherwise, we can use the non-thresholded decision values
1646
1646
1647
1647
>>> roc_auc_score(y, clf.decision_function(X))
1648
- np.float64( 0.99...)
1648
+ 0.99...
1649
1649
1650
1650
.. _roc_auc_multiclass :
1651
1651
@@ -1951,13 +1951,13 @@ Here is a small example of usage of this function::
1951
1951
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.4])
1952
1952
>>> y_pred = np.array([0, 1, 1, 0])
1953
1953
>>> brier_score_loss(y_true, y_prob)
1954
- np.float64( 0.055)
1954
+ 0.055
1955
1955
>>> brier_score_loss(y_true, 1 - y_prob, pos_label=0)
1956
- np.float64( 0.055)
1956
+ 0.055
1957
1957
>>> brier_score_loss(y_true_categorical, y_prob, pos_label="ham")
1958
- np.float64( 0.055)
1958
+ 0.055
1959
1959
>>> brier_score_loss(y_true, y_prob > 0.5)
1960
- np.float64( 0.0)
1960
+ 0.0
1961
1961
1962
1962
The Brier score can be used to assess how well a classifier is calibrated.
1963
1963
However, a lower Brier score loss does not always mean a better calibration.
@@ -2236,7 +2236,7 @@ Here is a small example of usage of this function::
2236
2236
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
2237
2237
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
2238
2238
>>> coverage_error(y_true, y_score)
2239
- np.float64( 2.5)
2239
+ 2.5
2240
2240
2241
2241
.. _label_ranking_average_precision :
2242
2242
@@ -2283,7 +2283,7 @@ Here is a small example of usage of this function::
2283
2283
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
2284
2284
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
2285
2285
>>> label_ranking_average_precision_score(y_true, y_score)
2286
- np.float64( 0.416...)
2286
+ 0.416...
2287
2287
2288
2288
.. _label_ranking_loss :
2289
2289
@@ -2318,11 +2318,11 @@ Here is a small example of usage of this function::
2318
2318
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
2319
2319
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
2320
2320
>>> label_ranking_loss(y_true, y_score)
2321
- np.float64( 0.75...)
2321
+ 0.75...
2322
2322
>>> # With the following prediction, we have perfect and minimal loss
2323
2323
>>> y_score = np.array([[1.0, 0.1, 0.2], [0.1, 0.2, 0.9]])
2324
2324
>>> label_ranking_loss(y_true, y_score)
2325
- np.float64( 0.0)
2325
+ 0.0
2326
2326
2327
2327
2328
2328
.. dropdown :: References
@@ -2700,7 +2700,7 @@ function::
2700
2700
>>> y_true = [3, -0.5, 2, 7]
2701
2701
>>> y_pred = [2.5, 0.0, 2, 8]
2702
2702
>>> median_absolute_error(y_true, y_pred)
2703
- np.float64( 0.5)
2703
+ 0.5
2704
2704
2705
2705
2706
2706
@@ -2732,7 +2732,7 @@ Here is a small example of usage of the :func:`max_error` function::
2732
2732
>>> y_true = [3, 2, 7, 1]
2733
2733
>>> y_pred = [9, 2, 7, 1]
2734
2734
>>> max_error(y_true, y_pred)
2735
- np.int64(6)
2735
+ 6.0
2736
2736
2737
2737
The :func: `max_error ` does not support multioutput.
2738
2738
@@ -3011,15 +3011,15 @@ of 0.0.
3011
3011
>>> y_true = [3, -0.5, 2, 7]
3012
3012
>>> y_pred = [2.5, 0.0, 2, 8]
3013
3013
>>> d2_absolute_error_score(y_true, y_pred)
3014
- np.float64( 0.764...)
3014
+ 0.764...
3015
3015
>>> y_true = [1, 2, 3]
3016
3016
>>> y_pred = [1, 2, 3]
3017
3017
>>> d2_absolute_error_score(y_true, y_pred)
3018
- np.float64( 1.0)
3018
+ 1.0
3019
3019
>>> y_true = [1, 2, 3]
3020
3020
>>> y_pred = [2, 2, 2]
3021
3021
>>> d2_absolute_error_score(y_true, y_pred)
3022
- np.float64( 0.0)
3022
+ 0.0
3023
3023
3024
3024
3025
3025
.. _visualization_regression_evaluation :
0 commit comments