Skip to content

[WIP] MNT enforce column names consistency #17407

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 21 additions & 1 deletion sklearn/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from .utils import _IS_32BIT
from .utils.validation import check_X_y
from .utils.validation import check_array
from .utils.validation import _is_dataframe
from .utils._estimator_html_repr import estimator_html_repr
from .utils.validation import _deprecate_positional_args

Expand Down Expand Up @@ -376,6 +377,21 @@ def _check_n_features(self, X, reset):
self.n_features_in_)
)

def _check_feature_names(self, df, reset):
# set _feature_names_in attribute or check against it

if reset:
self._feature_names_in = df.columns.values
elif hasattr(self, '_feature_names_in'):
feature_names = df.columns.values
if np.any(feature_names != self._feature_names_in):
raise ValueError(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think for backward compatibility we need to warn first, right?

"The column names of the dataframe must match those "
"that were passed during fit(), in the same order. "
f"Got ({feature_names}), expected "
f"({self._feature_names_in})."
)

def _validate_data(self, X, y=None, reset=True,
validate_separately=False, **check_params):
"""Validate input data and set or check the `n_features_in_` attribute.
Expand Down Expand Up @@ -406,9 +422,11 @@ def _validate_data(self, X, y=None, reset=True,
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if `y` is not None.
"""
is_df = _is_dataframe(X)
X_orig = X

if y is None:
if self._get_tags()['requires_y']:
if reset and self._get_tags()['requires_y']:
raise ValueError(
f"This {self.__class__.__name__} estimator "
f"requires y to be passed, but the target y is None."
Expand All @@ -430,6 +448,8 @@ def _validate_data(self, X, y=None, reset=True,

if check_params.get('ensure_2d', True):
self._check_n_features(X, reset=reset)
if is_df:
self._check_feature_names(X_orig, reset=reset)

return out

Expand Down
4 changes: 2 additions & 2 deletions sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py
Original file line number Diff line number Diff line change
Expand Up @@ -640,9 +640,9 @@ def _raw_predict(self, X):
raw_predictions : array, shape (n_trees_per_iteration, n_samples)
The raw predicted values.
"""
X = check_array(X, dtype=[X_DTYPE, X_BINNED_DTYPE],
force_all_finite=False)
check_is_fitted(self)
X = self._validate_data(X, dtype=[X_DTYPE, X_BINNED_DTYPE],
force_all_finite=False, reset=False)
if X.shape[1] != self.n_features_:
raise ValueError(
'X has {} features but this estimator was trained with '
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -794,3 +794,10 @@ def test_staged_predict(HistGradientBoosting, X, y):

assert_allclose(staged_predictions, pred_aux)
assert staged_predictions.shape == pred_aux.shape


def test_df_column_names():
from sklearn.utils.estimator_checks import check_dataframe_column_names_consistency # noqa

check_dataframe_column_names_consistency('hgbdt',
HistGradientBoostingRegressor())
42 changes: 42 additions & 0 deletions sklearn/utils/estimator_checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -2991,3 +2991,45 @@ def check_requires_y_none(name, estimator_orig):
except ValueError as ve:
if not any(msg in str(ve) for msg in expected_err_msgs):
warnings.warn(warning_msg, FutureWarning)


def check_dataframe_column_names_consistency(name, estimator_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest(
"pandas is not installed: not checking column names consistency"
)

rng = np.random.RandomState(0)

estimator = clone(estimator_orig)
set_random_state(estimator)
if 'warm_start' in estimator.get_params():
estimator.set_params(warm_start=False)

n_samples = 100
X = rng.normal(loc=100, size=(n_samples, 2))
X = _pairwise_estimator_convert_X(X, estimator)
X = pd.DataFrame(X)

if is_regressor(estimator_orig):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)

estimator.fit(X, y)
if hasattr(estimator, '_feature_names_in'):
assert_array_equal(estimator._feature_names_in, X.columns.values)

bad_X = X[X.columns[::-1]] # reverse column order

for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
estimator.predict(X)
msg = ("column names of the dataframe must match those that were "
"passed during fit")
assert_raises_regex(ValueError, msg, getattr(estimator, method),
bad_X)
13 changes: 13 additions & 0 deletions sklearn/utils/tests/test_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
_deprecate_positional_args,
_check_sample_weight,
_allclose_dense_sparse,
_is_dataframe,
FLOAT_DTYPES)
from sklearn.utils.validation import _check_fit_params

Expand Down Expand Up @@ -1213,3 +1214,15 @@ def test_check_sparse_pandas_sp_format(sp_format):
assert sp.issparse(result)
assert result.format == sp_format
assert_allclose_dense_sparse(sp_mat, result)


def test_is_dataframe():
pd = pytest.importorskip('pandas')

assert _is_dataframe(pd.DataFrame(np.arange(10)))
assert _is_dataframe(pd.Series(np.arange(10)))

assert not _is_dataframe(np.arange(10))
assert not _is_dataframe(list(range(10)))
assert not _is_dataframe(1234)
assert not _is_dataframe('still not a df')
5 changes: 5 additions & 0 deletions sklearn/utils/validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1378,3 +1378,8 @@ def _check_fit_params(X, fit_params, indices=None):
)

return fit_params_validated


def _is_dataframe(X):
# Return True if X is a pandas dataframe (or a Series)
return hasattr(X, 'iloc')
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is how we've done in other places as well, but I vaguely remember some upcoming API changes on pandas side which would affect this. How would you test for a DataFrame @TomAugspurger ?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm I'm not sure. There aren't any plans to remove DataFrame.iloc or Series.iloc.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a reason why you don't just test for type(X) == type(pd.DataFrame()) or type(X) == type(pd.Series())?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that would require having pandas as a dependency and we don't want that

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can do a similarly direct test without adding a dependency on pandas... But generally duck typing is recommended in python to allow for new players with compatible APIs?