Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion sklearn/linear_model/_glm/_newton_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

from ..._loss.loss import HalfSquaredError
from ...exceptions import ConvergenceWarning
from ...utils.fixes import _get_additional_lbfgs_options_dict
from ...utils.optimize import _check_optimize_result
from .._linear_loss import LinearModelLoss

Expand Down Expand Up @@ -187,9 +188,9 @@ def fallback_lbfgs_solve(self, X, y, sample_weight):
options={
"maxiter": max_iter,
"maxls": 50, # default is 20
"iprint": self.verbose - 1,
"gtol": self.tol,
"ftol": 64 * np.finfo(np.float64).eps,
**_get_additional_lbfgs_options_dict("iprint", self.verbose - 1),
},
args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
)
Expand Down
3 changes: 2 additions & 1 deletion sklearn/linear_model/_glm/glm.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from ...utils import check_array
from ...utils._openmp_helpers import _openmp_effective_n_threads
from ...utils._param_validation import Hidden, Interval, StrOptions
from ...utils.fixes import _get_additional_lbfgs_options_dict
from ...utils.optimize import _check_optimize_result
from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data
from .._linear_loss import LinearModelLoss
Expand Down Expand Up @@ -273,12 +274,12 @@ def fit(self, X, y, sample_weight=None):
options={
"maxiter": self.max_iter,
"maxls": 50, # default is 20
"iprint": self.verbose - 1,
"gtol": self.tol,
# The constant 64 was found empirically to pass the test suite.
# The point is that ftol is very small, but a bit larger than
# machine precision for float64, which is the dtype used by lbfgs.
"ftol": 64 * np.finfo(float).eps,
**_get_additional_lbfgs_options_dict("iprint", self.verbose - 1),
},
args=(X, y, sample_weight, l2_reg_strength, n_threads),
)
Expand Down
7 changes: 6 additions & 1 deletion sklearn/linear_model/_huber.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from ..utils._mask import axis0_safe_slice
from ..utils._param_validation import Interval
from ..utils.extmath import safe_sparse_dot
from ..utils.fixes import _get_additional_lbfgs_options_dict
from ..utils.optimize import _check_optimize_result
from ..utils.validation import _check_sample_weight, validate_data
from ._base import LinearModel
Expand Down Expand Up @@ -329,7 +330,11 @@ def fit(self, X, y, sample_weight=None):
method="L-BFGS-B",
jac=True,
args=(X, y, self.epsilon, self.alpha, sample_weight),
options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1},
options={
"maxiter": self.max_iter,
"gtol": self.tol,
**_get_additional_lbfgs_options_dict("iprint", -1),
},
bounds=bounds,
)

Expand Down
3 changes: 2 additions & 1 deletion sklearn/linear_model/_logistic.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
)
from ..utils._param_validation import Hidden, Interval, StrOptions
from ..utils.extmath import row_norms, softmax
from ..utils.fixes import _get_additional_lbfgs_options_dict
from ..utils.metadata_routing import (
MetadataRouter,
MethodMapping,
Expand Down Expand Up @@ -464,9 +465,9 @@ def _logistic_regression_path(
options={
"maxiter": max_iter,
"maxls": 50, # default is 20
"iprint": iprint,
"gtol": tol,
"ftol": 64 * np.finfo(float).eps,
**_get_additional_lbfgs_options_dict("iprint", iprint),
},
)
n_iter_i = _check_optimize_result(
Expand Down
6 changes: 5 additions & 1 deletion sklearn/neighbors/_nca.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from ..preprocessing import LabelEncoder
from ..utils._param_validation import Interval, StrOptions
from ..utils.extmath import softmax
from ..utils.fixes import _get_additional_lbfgs_options_dict
from ..utils.multiclass import check_classification_targets
from ..utils.random import check_random_state
from ..utils.validation import check_array, check_is_fitted, validate_data
Expand Down Expand Up @@ -312,7 +313,10 @@ def fit(self, X, y):
"jac": True,
"x0": transformation,
"tol": self.tol,
"options": dict(maxiter=self.max_iter, disp=disp),
"options": dict(
maxiter=self.max_iter,
**_get_additional_lbfgs_options_dict("disp", disp),
),
"callback": self._callback,
}

Expand Down
3 changes: 2 additions & 1 deletion sklearn/neural_network/_multilayer_perceptron.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
)
from ..utils._param_validation import Interval, Options, StrOptions
from ..utils.extmath import safe_sparse_dot
from ..utils.fixes import _get_additional_lbfgs_options_dict
from ..utils.metaestimators import available_if
from ..utils.multiclass import (
_check_partial_fit_first_call,
Expand Down Expand Up @@ -585,8 +586,8 @@ def _fit_lbfgs(
options={
"maxfun": self.max_fun,
"maxiter": self.max_iter,
"iprint": iprint,
"gtol": self.tol,
**_get_additional_lbfgs_options_dict("iprint", iprint),
},
args=(
X,
Expand Down
10 changes: 10 additions & 0 deletions sklearn/utils/fixes.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,6 +394,16 @@ def _in_unstable_openblas_configuration():
return False


# TODO: Remove when Scipy 1.15 is the minimum supported version. In scipy 1.15,
# the internal info details (via 'iprint' and 'disp' options) were dropped,
# following the LBFGS rewrite from Fortran to C, see
# https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. For
# scipy 1.15, 'iprint' and 'disp' have no effect and for scipy >= 1.16 a
# DeprecationWarning is emitted.
def _get_additional_lbfgs_options_dict(key, value):
return {} if sp_version >= parse_version("1.15") else {key: value}


# TODO(pyarrow): Remove when minimum pyarrow version is 17.0.0
PYARROW_VERSION_BELOW_17 = False
try:
Expand Down