Skip to content

BUG Fixes sample weights when there are missing values in DecisionTrees #26376

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/whats_new/v1.3.rst
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ Changelog
:class:`tree.DecisionTreeClassifier` support missing values when
`splitter='best'` and criterion is `gini`, `entropy`, or `log_loss`,
for classification or `squared_error`, `friedman_mse`, or `poisson`
for regression. :pr:`23595` by `Thomas Fan`_.
for regression. :pr:`23595`, :pr:`26376` by `Thomas Fan`_.

- |Enhancement| Adds a `class_names` parameter to
:func:`tree.export_text`. This allows specifying the parameter `class_names`
Expand Down
8 changes: 6 additions & 2 deletions sklearn/tree/_criterion.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -838,7 +838,9 @@ cdef class RegressionCriterion(Criterion):
self.sample_indices[-n_missing:]
"""
cdef SIZE_t i, p, k
cdef DOUBLE_t w = 0.0
cdef DOUBLE_t y_ik
cdef DOUBLE_t w_y_ik
cdef DOUBLE_t w = 1.0

self.n_missing = n_missing
if n_missing == 0:
Expand All @@ -855,7 +857,9 @@ cdef class RegressionCriterion(Criterion):
w = self.sample_weight[i]

for k in range(self.n_outputs):
self.sum_missing[k] += w
y_ik = self.y[i, k]
w_y_ik = w * y_ik
self.sum_missing[k] += w_y_ik

self.weighted_n_missing += w

Expand Down
39 changes: 36 additions & 3 deletions sklearn/tree/tests/test_tree.py
Original file line number Diff line number Diff line change
Expand Up @@ -2549,7 +2549,8 @@ def test_missing_values_poisson():
(datasets.make_classification, DecisionTreeClassifier),
],
)
def test_missing_values_is_resilience(make_data, Tree):
@pytest.mark.parametrize("sample_weight_train", [None, "ones"])
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we also test the behavior when using non-uniform weights?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably, an easier test than non-uniform weight is to assign 0-weight to some specific samples.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might be worth having a separated test for checking an equivalence.

def test_missing_values_is_resilience(make_data, Tree, sample_weight_train):
"""Check that trees can deal with missing values and have decent performance."""

rng = np.random.RandomState(0)
Expand All @@ -2563,15 +2564,18 @@ def test_missing_values_is_resilience(make_data, Tree):
X_missing, y, random_state=0
)

if sample_weight_train == "ones":
sample_weight_train = np.ones(X_missing_train.shape[0])

# Train tree with missing values
tree_with_missing = Tree(random_state=rng)
tree_with_missing.fit(X_missing_train, y_train)
tree_with_missing.fit(X_missing_train, y_train, sample_weight=sample_weight_train)
score_with_missing = tree_with_missing.score(X_missing_test, y_test)

# Train tree without missing values
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
tree = Tree(random_state=rng)
tree.fit(X_train, y_train)
tree.fit(X_train, y_train, sample_weight=sample_weight_train)
score_without_missing = tree.score(X_test, y_test)

# Score is still 90 percent of the tree's score that had no missing values
Expand Down Expand Up @@ -2601,3 +2605,32 @@ def test_missing_value_is_predictive():

assert tree.score(X_train, y_train) >= 0.85
assert tree.score(X_test, y_test) >= 0.85


@pytest.mark.parametrize(
"make_data, Tree",
[
(datasets.make_regression, DecisionTreeRegressor),
(datasets.make_classification, DecisionTreeClassifier),
],
)
def test_sample_weight_non_uniform(make_data, Tree):
"""Check sample weight is correctly handled with missing values."""
rng = np.random.RandomState(0)
n_samples, n_features = 1000, 10
X, y = make_data(n_samples=n_samples, n_features=n_features, random_state=rng)

# Create dataset with missing values
X[rng.choice([False, True], size=X.shape, p=[0.9, 0.1])] = np.nan
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

neat idiom :)


# Zero sample weight is the same as removing the sample
sample_weight = np.ones(X.shape[0])
sample_weight[::2] = 0.0

tree_with_sw = Tree(random_state=0)
tree_with_sw.fit(X, y, sample_weight=sample_weight)

tree_samples_removed = Tree(random_state=0)
tree_samples_removed.fit(X[1::2, :], y[1::2])

assert_allclose(tree_samples_removed.predict(X), tree_with_sw.predict(X))