Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 0 additions & 5 deletions sklearn/calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -511,11 +511,6 @@ def get_metadata_routing(self):
splitter=self.cv,
method_mapping=MethodMapping().add(callee="split", caller="fit"),
)
# the fit method already accepts everything, therefore we don't
# specify parameters. The value passed to ``child`` needs to be the
# same as what's passed to ``add`` above, in this case
# `"estimator"`.
.warn_on(child="estimator", method="fit", params=None)
)
return router

Expand Down
11 changes: 1 addition & 10 deletions sklearn/metrics/_scorer.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,11 +402,6 @@ def _score(self, method_caller, clf, X, y, **kwargs):
y_pred = self._select_proba_binary(y_pred, clf.classes_)

scoring_kwargs = {**self._kwargs, **kwargs}
# this is for backward compatibility to avoid passing sample_weight
# to the scorer if it's None
# TODO(1.3) Probably remove
if scoring_kwargs.get("sample_weight", -1) is None:
del scoring_kwargs["sample_weight"]

return self._sign * self._score_func(y, y_pred, **scoring_kwargs)

Expand Down Expand Up @@ -487,11 +482,7 @@ def _score(self, method_caller, clf, X, y, **kwargs):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T

scoring_kwargs = {**self._kwargs, **kwargs}
# this is for backward compatibility to avoid passing sample_weight
# to the scorer if it's None
# TODO(1.3) Probably remove
if scoring_kwargs.get("sample_weight", -1) is None:
del scoring_kwargs["sample_weight"]

return self._sign * self._score_func(y, y_pred, **scoring_kwargs)

def _factory_args(self):
Expand Down
30 changes: 8 additions & 22 deletions sklearn/multioutput.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,21 +269,11 @@ def get_metadata_routing(self):
A :class:`~utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self.__class__.__name__).add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(callee="partial_fit", caller="partial_fit")
.add(callee="fit", caller="fit"),
)
# the fit method already accepts everything, therefore we don't
# specify parameters. The value passed to ``child`` needs to be the
# same as what's passed to ``add`` above, in this case
# `"estimator"`.
.warn_on(child="estimator", method="fit", params=None)
# the partial_fit method at the time of this change (v1.2) only
# supports sample_weight, therefore we only include this metadata.
.warn_on(child="estimator", method="partial_fit", params=["sample_weight"])
router = MetadataRouter(owner=self.__class__.__name__).add(
estimator=self.estimator,
method_mapping=MethodMapping()
.add(callee="partial_fit", caller="partial_fit")
.add(callee="fit", caller="fit"),
)
return router

Expand Down Expand Up @@ -1087,13 +1077,9 @@ def get_metadata_routing(self):
A :class:`~utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self.__class__.__name__)
.add(
estimator=self.base_estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
.warn_on(child="estimator", method="fit", params=None)
router = MetadataRouter(owner=self.__class__.__name__).add(
estimator=self.base_estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
return router

Expand Down
72 changes: 0 additions & 72 deletions sklearn/tests/test_metaestimators_metadata_routing.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
from sklearn.utils.metadata_routing import MetadataRouter
from sklearn.tests.test_metadata_routing import (
record_metadata,
check_recorded_metadata,
assert_request_is_empty,
)

Expand Down Expand Up @@ -159,10 +158,6 @@ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
"X": X,
"y": y_multi,
"routing_methods": ["fit", "partial_fit"],
"warns_on": {
"fit": ["sample_weight", "metadata"],
"partial_fit": ["sample_weight"],
},
},
{
"metaestimator": MultiOutputClassifier,
Expand All @@ -171,10 +166,6 @@ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
"X": X,
"y": y_multi,
"routing_methods": ["fit", "partial_fit"],
"warns_on": {
"fit": ["sample_weight", "metadata"],
"partial_fit": ["sample_weight"],
},
},
{
"metaestimator": CalibratedClassifierCV,
Expand All @@ -183,7 +174,6 @@ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
"X": X,
"y": y,
"routing_methods": ["fit"],
"warns_on": {"fit": ["sample_weight", "metadata"]},
"preserves_metadata": False,
},
{
Expand All @@ -193,7 +183,6 @@ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
"X": X,
"y": y_multi,
"routing_methods": ["fit"],
"warns_on": {},
},
{
"metaestimator": RegressorChain,
Expand All @@ -202,7 +191,6 @@ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
"X": X,
"y": y_multi,
"routing_methods": ["fit"],
"warns_on": {"fit": ["sample_weight", "metadata"]},
},
]
"""List containing all metaestimators to be tested and their settings
Expand All @@ -215,10 +203,6 @@ def predict_log_proba(self, X, sample_weight="default", metadata="default"):
- X: X-data to fit and predict
- y: y-data to fit
- routing_methods: list of all methods to check for routing
- warns_on: A dict containing all methods as keys, and arguments as values,
whose combination is supposed to result in a warning if routing is not
requested. It is implied that all routing methods and arguments not listed
here should result in an error.
- preserves_metadata: Whether the metaestimator passes the metadata to the
sub-estimator without modification or not. If it does, we check that the
values are identical. If it doesn', no check is performed. TODO Maybe
Expand All @@ -245,56 +229,6 @@ def test_default_request(metaestimator):
assert isinstance(instance.get_metadata_routing(), MetadataRouter)


@pytest.mark.parametrize(
"metaestimator",
METAESTIMATORS,
ids=METAESTIMATOR_IDS,
)
def test_warning_for_indicated_methods(metaestimator):
# Check that the indicated methods give a warning
# TODO: Always error for 1.4
cls = metaestimator["metaestimator"]
registry = _Registry()
estimator = metaestimator["estimator"](registry=registry)
estimator_name = metaestimator["estimator_name"]
X = metaestimator["X"]
y = metaestimator["y"]
routing_methods = metaestimator["routing_methods"]
warns_on = metaestimator["warns_on"]

for method_name in routing_methods:
if method_name not in warns_on:
# this method is not expected to warn
continue

for key in warns_on[method_name]:
val = {"sample_weight": sample_weight, "metadata": metadata}[key]
kwargs = {key: val}
warn_msg = (
"You are passing metadata for which the request values are not"
f" explicitly set: {key}. From version 1.4 this results in the"
f" following error: [{key}] are passed but are not explicitly set as"
f" requested or not for {estimator.__class__.__name__}.{method_name}"
)

instance = cls(**{estimator_name: estimator})
if "fit" not in method_name: # instance needs to be fitted first
instance.fit(X, y)
with pytest.warns(FutureWarning, match=re.escape(warn_msg)):
method = getattr(instance, method_name)
method(X, y, **kwargs)

if metaestimator.get("preserves_metadata", True):
# sanity check that registry is not empty, or else the test
# passes trivially
assert registry
for estimator in registry:
check_recorded_metadata(estimator, method_name, **kwargs)
# clear the registry since the check could be different for the next
# method being tested
registry.clear()


@pytest.mark.parametrize(
"metaestimator",
METAESTIMATORS,
Expand All @@ -309,15 +243,9 @@ def test_error_for_other_methods(metaestimator):
X = metaestimator["X"]
y = metaestimator["y"]
routing_methods = metaestimator["routing_methods"]
warns_on = metaestimator["warns_on"]

for method_name in routing_methods:
warn_args = warns_on.get(method_name, [])
for key in ["sample_weight", "metadata"]:
if key in warn_args:
# this method is expected to warn for this argument, not raise
continue

val = {"sample_weight": sample_weight, "metadata": metadata}[key]
kwargs = {key: val}
msg = (
Expand Down
10 changes: 2 additions & 8 deletions sklearn/tests/test_multioutput.py
Original file line number Diff line number Diff line change
Expand Up @@ -656,15 +656,9 @@ def fit(self, X, y, **fit_params):
for est in model.estimators_:
assert est.sample_weight_ is weight

# TODO(1.4): Remove check for FutureWarning
# Test that the existing behavior works and raises a FutureWarning
# when the underlying estimator used has a sample_weight parameter
# defined in it's fit method.
model = RegressorChain(QuantileRegressor())
model = RegressorChain(QuantileRegressor().set_fit_request(sample_weight=True))
fit_param = {"sample_weight": weight}

with pytest.warns(FutureWarning):
model.fit(X, y, **fit_param)
model.fit(X, y, **fit_param)


@pytest.mark.parametrize(
Expand Down