|
39 | 39 | import scipy.sparse as sp |
40 | 40 |
|
41 | 41 | from .base import BaseEstimator, ClassifierMixin, clone, is_classifier |
42 | | -from .base import MetaEstimatorMixin |
| 42 | +from .base import MetaEstimatorMixin, is_regressor |
43 | 43 | from .preprocessing import LabelBinarizer |
44 | 44 | from .metrics.pairwise import euclidean_distances |
45 | 45 | from .utils import check_random_state |
@@ -77,6 +77,8 @@ def _fit_binary(estimator, X, y, classes=None): |
77 | 77 |
|
78 | 78 | def _predict_binary(estimator, X): |
79 | 79 | """Make predictions using a single binary estimator.""" |
| 80 | + if is_regressor(estimator): |
| 81 | + return estimator.predict(X) |
80 | 82 | try: |
81 | 83 | score = np.ravel(estimator.decision_function(X)) |
82 | 84 | except (AttributeError, NotImplementedError): |
@@ -276,11 +278,11 @@ def fit(self, X, y): |
276 | 278 | # In cases where individual estimators are very fast to train setting |
277 | 279 | # n_jobs > 1 in can results in slower performance due to the overhead |
278 | 280 | # of spawning threads. See joblib issue #112. |
279 | | - self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary) |
280 | | - (self.estimator, X, column, |
281 | | - classes=["not %s" % self.label_binarizer_.classes_[i], |
282 | | - self.label_binarizer_.classes_[i]]) |
283 | | - for i, column in enumerate(columns)) |
| 281 | + self.estimators_ = Parallel(n_jobs=self.n_jobs)(delayed(_fit_binary)( |
| 282 | + self.estimator, X, column, classes=[ |
| 283 | + "not %s" % self.label_binarizer_.classes_[i], |
| 284 | + self.label_binarizer_.classes_[i]]) |
| 285 | + for i, column in enumerate(columns)) |
284 | 286 |
|
285 | 287 | return self |
286 | 288 |
|
|
0 commit comments