diff --git a/doc/modules/ensemble.rst b/doc/modules/ensemble.rst index 6047cd5bc7511..2d1f05f230ed4 100644 --- a/doc/modules/ensemble.rst +++ b/doc/modules/ensemble.rst @@ -740,8 +740,10 @@ of ``learning_rate`` require larger numbers of weak learners to maintain a constant training error. Empirical evidence suggests that small values of ``learning_rate`` favor better test error. [HTF]_ recommend to set the learning rate to a small constant -(e.g. ``learning_rate <= 0.1``) and choose ``n_estimators`` by early -stopping. For a more detailed discussion of the interaction between +(e.g. ``learning_rate <= 0.1``) and choose ``n_estimators`` large enough +that early stopping applies, +see :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py` +for a more detailed discussion of the interaction between ``learning_rate`` and ``n_estimators`` see [R2007]_. Subsampling diff --git a/examples/ensemble/plot_gradient_boosting_early_stopping.py b/examples/ensemble/plot_gradient_boosting_early_stopping.py index f271f80a07c55..e8514fe2aff87 100644 --- a/examples/ensemble/plot_gradient_boosting_early_stopping.py +++ b/examples/ensemble/plot_gradient_boosting_early_stopping.py @@ -1,6 +1,6 @@ """ =================================== -Early stopping of Gradient Boosting +Early stopping in Gradient Boosting =================================== Gradient boosting is an ensembling technique where several weak learners diff --git a/sklearn/ensemble/_gb.py b/sklearn/ensemble/_gb.py index e198babdb28d7..e3d4515e1694d 100644 --- a/sklearn/ensemble/_gb.py +++ b/sklearn/ensemble/_gb.py @@ -1282,6 +1282,8 @@ class GradientBoostingClassifier(ClassifierMixin, BaseGradientBoosting): improving in all of the previous ``n_iter_no_change`` numbers of iterations. The split is stratified. Values must be in the range `[1, inf)`. + See + :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`. .. versionadded:: 0.20 @@ -1891,6 +1893,8 @@ class GradientBoostingRegressor(RegressorMixin, BaseGradientBoosting): improving in all of the previous ``n_iter_no_change`` numbers of iterations. Values must be in the range `[1, inf)`. + See + :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`. .. versionadded:: 0.20 diff --git a/sklearn/linear_model/_passive_aggressive.py b/sklearn/linear_model/_passive_aggressive.py index d27cc928ca056..68237ade18bb5 100644 --- a/sklearn/linear_model/_passive_aggressive.py +++ b/sklearn/linear_model/_passive_aggressive.py @@ -35,11 +35,11 @@ class PassiveAggressiveClassifier(BaseSGDClassifier): .. versionadded:: 0.19 early_stopping : bool, default=False - Whether to use early stopping to terminate training when validation. + Whether to use early stopping to terminate training when validation score is not improving. If set to True, it will automatically set aside a stratified fraction of training data as validation and terminate - training when validation score is not improving by at least tol for - n_iter_no_change consecutive epochs. + training when validation score is not improving by at least `tol` for + `n_iter_no_change` consecutive epochs. .. versionadded:: 0.20 diff --git a/sklearn/linear_model/_perceptron.py b/sklearn/linear_model/_perceptron.py index 30e781983365e..eaf3da556b24a 100644 --- a/sklearn/linear_model/_perceptron.py +++ b/sklearn/linear_model/_perceptron.py @@ -68,11 +68,11 @@ class Perceptron(BaseSGDClassifier): See :term:`Glossary `. early_stopping : bool, default=False - Whether to use early stopping to terminate training when validation. + Whether to use early stopping to terminate training when validation score is not improving. If set to True, it will automatically set aside a stratified fraction of training data as validation and terminate - training when validation score is not improving by at least tol for - n_iter_no_change consecutive epochs. + training when validation score is not improving by at least `tol` for + `n_iter_no_change` consecutive epochs. .. versionadded:: 0.20 diff --git a/sklearn/neural_network/_multilayer_perceptron.py b/sklearn/neural_network/_multilayer_perceptron.py index d64593c27d6f5..02303006dd91c 100644 --- a/sklearn/neural_network/_multilayer_perceptron.py +++ b/sklearn/neural_network/_multilayer_perceptron.py @@ -887,7 +887,7 @@ class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron): Whether to use early stopping to terminate training when validation score is not improving. If set to true, it will automatically set aside 10% of training data as validation and terminate training when - validation score is not improving by at least tol for + validation score is not improving by at least ``tol`` for ``n_iter_no_change`` consecutive epochs. The split is stratified, except in a multilabel setting. If early stopping is False, then the training stops when the training