From 37edfee05f42a46f04573600a98f45c9902d5848 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Tue, 24 Jun 2025 10:43:54 +0200 Subject: [PATCH 1/7] Remove deprecated iprint usage in scipy 1.15 LBFGS --- sklearn/linear_model/_glm/_newton_solver.py | 3 ++- sklearn/linear_model/_glm/glm.py | 3 ++- sklearn/linear_model/_huber.py | 7 ++++++- sklearn/linear_model/_logistic.py | 3 ++- sklearn/neural_network/_multilayer_perceptron.py | 3 ++- sklearn/utils/fixes.py | 10 ++++++++++ 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/sklearn/linear_model/_glm/_newton_solver.py b/sklearn/linear_model/_glm/_newton_solver.py index c5c940bed6c39..7d89c4a5d76e1 100644 --- a/sklearn/linear_model/_glm/_newton_solver.py +++ b/sklearn/linear_model/_glm/_newton_solver.py @@ -14,6 +14,7 @@ from ..._loss.loss import HalfSquaredError from ...exceptions import ConvergenceWarning +from ...utils.fixes import _get_lbfgs_iprint_options_dict from ...utils.optimize import _check_optimize_result from .._linear_loss import LinearModelLoss @@ -187,9 +188,9 @@ def fallback_lbfgs_solve(self, X, y, sample_weight): options={ "maxiter": max_iter, "maxls": 50, # default is 20 - "iprint": self.verbose - 1, "gtol": self.tol, "ftol": 64 * np.finfo(np.float64).eps, + **_get_lbfgs_iprint_options_dict(self.verbose - 1), }, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), ) diff --git a/sklearn/linear_model/_glm/glm.py b/sklearn/linear_model/_glm/glm.py index 7f138f420dc36..c2fc1a221a9e3 100644 --- a/sklearn/linear_model/_glm/glm.py +++ b/sklearn/linear_model/_glm/glm.py @@ -21,6 +21,7 @@ from ...utils import check_array from ...utils._openmp_helpers import _openmp_effective_n_threads from ...utils._param_validation import Hidden, Interval, StrOptions +from ...utils.fixes import _get_lbfgs_iprint_options_dict from ...utils.optimize import _check_optimize_result from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data from .._linear_loss import LinearModelLoss @@ -273,12 +274,12 @@ def fit(self, X, y, sample_weight=None): options={ "maxiter": self.max_iter, "maxls": 50, # default is 20 - "iprint": self.verbose - 1, "gtol": self.tol, # The constant 64 was found empirically to pass the test suite. # The point is that ftol is very small, but a bit larger than # machine precision for float64, which is the dtype used by lbfgs. "ftol": 64 * np.finfo(float).eps, + **_get_lbfgs_iprint_options_dict(self.verbose - 1), }, args=(X, y, sample_weight, l2_reg_strength, n_threads), ) diff --git a/sklearn/linear_model/_huber.py b/sklearn/linear_model/_huber.py index 51f24035a3c83..09d03520fe6e3 100644 --- a/sklearn/linear_model/_huber.py +++ b/sklearn/linear_model/_huber.py @@ -10,6 +10,7 @@ from ..utils._mask import axis0_safe_slice from ..utils._param_validation import Interval from ..utils.extmath import safe_sparse_dot +from ..utils.fixes import _get_lbfgs_iprint_options_dict from ..utils.optimize import _check_optimize_result from ..utils.validation import _check_sample_weight, validate_data from ._base import LinearModel @@ -329,7 +330,11 @@ def fit(self, X, y, sample_weight=None): method="L-BFGS-B", jac=True, args=(X, y, self.epsilon, self.alpha, sample_weight), - options={"maxiter": self.max_iter, "gtol": self.tol, "iprint": -1}, + options={ + "maxiter": self.max_iter, + "gtol": self.tol, + **_get_lbfgs_iprint_options_dict(-1), + }, bounds=bounds, ) diff --git a/sklearn/linear_model/_logistic.py b/sklearn/linear_model/_logistic.py index b85c01ee69f9e..f2a9972500f65 100644 --- a/sklearn/linear_model/_logistic.py +++ b/sklearn/linear_model/_logistic.py @@ -30,6 +30,7 @@ ) from ..utils._param_validation import Hidden, Interval, StrOptions from ..utils.extmath import row_norms, softmax +from ..utils.fixes import _get_lbfgs_iprint_options_dict from ..utils.metadata_routing import ( MetadataRouter, MethodMapping, @@ -464,9 +465,9 @@ def _logistic_regression_path( options={ "maxiter": max_iter, "maxls": 50, # default is 20 - "iprint": iprint, "gtol": tol, "ftol": 64 * np.finfo(float).eps, + **_get_lbfgs_iprint_options_dict(iprint), }, ) n_iter_i = _check_optimize_result( diff --git a/sklearn/neural_network/_multilayer_perceptron.py b/sklearn/neural_network/_multilayer_perceptron.py index a8a00fe3b4ac5..42eecd23c88a5 100644 --- a/sklearn/neural_network/_multilayer_perceptron.py +++ b/sklearn/neural_network/_multilayer_perceptron.py @@ -31,6 +31,7 @@ ) from ..utils._param_validation import Interval, Options, StrOptions from ..utils.extmath import safe_sparse_dot +from ..utils.fixes import _get_lbfgs_iprint_options_dict from ..utils.metaestimators import available_if from ..utils.multiclass import ( _check_partial_fit_first_call, @@ -585,8 +586,8 @@ def _fit_lbfgs( options={ "maxfun": self.max_fun, "maxiter": self.max_iter, - "iprint": iprint, "gtol": self.tol, + **_get_lbfgs_iprint_options_dict(iprint), }, args=( X, diff --git a/sklearn/utils/fixes.py b/sklearn/utils/fixes.py index d85ef82680bbb..cdd46c15b3272 100644 --- a/sklearn/utils/fixes.py +++ b/sklearn/utils/fixes.py @@ -392,3 +392,13 @@ def _in_unstable_openblas_configuration(): # See discussions in https://github.com/numpy/numpy/issues/19411 return True # pragma: no cover return False + + +# TODO: Remove when Scipy 1.15 is the minimum supported version +# In scipy 1.15, wwhen LBFGS was converted from Fortran to C the internal info +# details (via 'iprint' options key) were dropped, see +# https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. +# For scipy 1.15, iprint has no effect and for scipy >= 1.16 a +# DeprecationWarning is emitted. +def _get_lbfgs_iprint_options_dict(iprint_value): + return {} if sp_version >= parse_version("1.15") else {"iprint": iprint_value} From c2317c697fcc39385e7085df0f99a603e14f3366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Tue, 24 Jun 2025 11:52:54 +0200 Subject: [PATCH 2/7] [azure parallel] [scipy-dev] more fixes --- sklearn/linear_model/_glm/_newton_solver.py | 4 ++-- sklearn/linear_model/_glm/glm.py | 4 ++-- sklearn/linear_model/_huber.py | 4 ++-- sklearn/linear_model/_logistic.py | 4 ++-- sklearn/neighbors/_nca.py | 5 ++++- sklearn/neural_network/_multilayer_perceptron.py | 4 ++-- sklearn/utils/fixes.py | 4 ++-- 7 files changed, 16 insertions(+), 13 deletions(-) diff --git a/sklearn/linear_model/_glm/_newton_solver.py b/sklearn/linear_model/_glm/_newton_solver.py index 7d89c4a5d76e1..944789d5fec8a 100644 --- a/sklearn/linear_model/_glm/_newton_solver.py +++ b/sklearn/linear_model/_glm/_newton_solver.py @@ -14,7 +14,7 @@ from ..._loss.loss import HalfSquaredError from ...exceptions import ConvergenceWarning -from ...utils.fixes import _get_lbfgs_iprint_options_dict +from ...utils.fixes import _get_lbfgs_options_dict from ...utils.optimize import _check_optimize_result from .._linear_loss import LinearModelLoss @@ -190,7 +190,7 @@ def fallback_lbfgs_solve(self, X, y, sample_weight): "maxls": 50, # default is 20 "gtol": self.tol, "ftol": 64 * np.finfo(np.float64).eps, - **_get_lbfgs_iprint_options_dict(self.verbose - 1), + **_get_lbfgs_options_dict("iprint", self.verbose - 1), }, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), ) diff --git a/sklearn/linear_model/_glm/glm.py b/sklearn/linear_model/_glm/glm.py index c2fc1a221a9e3..eba379f219350 100644 --- a/sklearn/linear_model/_glm/glm.py +++ b/sklearn/linear_model/_glm/glm.py @@ -21,7 +21,7 @@ from ...utils import check_array from ...utils._openmp_helpers import _openmp_effective_n_threads from ...utils._param_validation import Hidden, Interval, StrOptions -from ...utils.fixes import _get_lbfgs_iprint_options_dict +from ...utils.fixes import _get_lbfgs_options_dict from ...utils.optimize import _check_optimize_result from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data from .._linear_loss import LinearModelLoss @@ -279,7 +279,7 @@ def fit(self, X, y, sample_weight=None): # The point is that ftol is very small, but a bit larger than # machine precision for float64, which is the dtype used by lbfgs. "ftol": 64 * np.finfo(float).eps, - **_get_lbfgs_iprint_options_dict(self.verbose - 1), + **_get_lbfgs_options_dict("iprint", self.verbose - 1), }, args=(X, y, sample_weight, l2_reg_strength, n_threads), ) diff --git a/sklearn/linear_model/_huber.py b/sklearn/linear_model/_huber.py index 09d03520fe6e3..5528f282ff563 100644 --- a/sklearn/linear_model/_huber.py +++ b/sklearn/linear_model/_huber.py @@ -10,7 +10,7 @@ from ..utils._mask import axis0_safe_slice from ..utils._param_validation import Interval from ..utils.extmath import safe_sparse_dot -from ..utils.fixes import _get_lbfgs_iprint_options_dict +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.optimize import _check_optimize_result from ..utils.validation import _check_sample_weight, validate_data from ._base import LinearModel @@ -333,7 +333,7 @@ def fit(self, X, y, sample_weight=None): options={ "maxiter": self.max_iter, "gtol": self.tol, - **_get_lbfgs_iprint_options_dict(-1), + **_get_lbfgs_options_dict("iprint", -1), }, bounds=bounds, ) diff --git a/sklearn/linear_model/_logistic.py b/sklearn/linear_model/_logistic.py index f2a9972500f65..a7137aa585982 100644 --- a/sklearn/linear_model/_logistic.py +++ b/sklearn/linear_model/_logistic.py @@ -30,7 +30,7 @@ ) from ..utils._param_validation import Hidden, Interval, StrOptions from ..utils.extmath import row_norms, softmax -from ..utils.fixes import _get_lbfgs_iprint_options_dict +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.metadata_routing import ( MetadataRouter, MethodMapping, @@ -467,7 +467,7 @@ def _logistic_regression_path( "maxls": 50, # default is 20 "gtol": tol, "ftol": 64 * np.finfo(float).eps, - **_get_lbfgs_iprint_options_dict(iprint), + **_get_lbfgs_options_dict("iprint", iprint), }, ) n_iter_i = _check_optimize_result( diff --git a/sklearn/neighbors/_nca.py b/sklearn/neighbors/_nca.py index a4ef3c303b851..c89742edb115a 100644 --- a/sklearn/neighbors/_nca.py +++ b/sklearn/neighbors/_nca.py @@ -25,6 +25,7 @@ from ..preprocessing import LabelEncoder from ..utils._param_validation import Interval, StrOptions from ..utils.extmath import softmax +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.multiclass import check_classification_targets from ..utils.random import check_random_state from ..utils.validation import check_array, check_is_fitted, validate_data @@ -312,7 +313,9 @@ def fit(self, X, y): "jac": True, "x0": transformation, "tol": self.tol, - "options": dict(maxiter=self.max_iter, disp=disp), + "options": dict( + maxiter=self.max_iter, **_get_lbfgs_options_dict("disp", disp) + ), "callback": self._callback, } diff --git a/sklearn/neural_network/_multilayer_perceptron.py b/sklearn/neural_network/_multilayer_perceptron.py index 42eecd23c88a5..7b8dc5d7e2395 100644 --- a/sklearn/neural_network/_multilayer_perceptron.py +++ b/sklearn/neural_network/_multilayer_perceptron.py @@ -31,7 +31,7 @@ ) from ..utils._param_validation import Interval, Options, StrOptions from ..utils.extmath import safe_sparse_dot -from ..utils.fixes import _get_lbfgs_iprint_options_dict +from ..utils.fixes import _get_lbfgs_options_dict from ..utils.metaestimators import available_if from ..utils.multiclass import ( _check_partial_fit_first_call, @@ -587,7 +587,7 @@ def _fit_lbfgs( "maxfun": self.max_fun, "maxiter": self.max_iter, "gtol": self.tol, - **_get_lbfgs_iprint_options_dict(iprint), + **_get_lbfgs_options_dict("iprint", iprint), }, args=( X, diff --git a/sklearn/utils/fixes.py b/sklearn/utils/fixes.py index cdd46c15b3272..da010600c7649 100644 --- a/sklearn/utils/fixes.py +++ b/sklearn/utils/fixes.py @@ -400,5 +400,5 @@ def _in_unstable_openblas_configuration(): # https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. # For scipy 1.15, iprint has no effect and for scipy >= 1.16 a # DeprecationWarning is emitted. -def _get_lbfgs_iprint_options_dict(iprint_value): - return {} if sp_version >= parse_version("1.15") else {"iprint": iprint_value} +def _get_lbfgs_options_dict(key, iprint_value): + return {} if sp_version >= parse_version("1.15") else {key: iprint_value} From f51b05ba7ad2be52840a9f0134e1b00ba4ff2813 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Tue, 24 Jun 2025 14:12:54 +0200 Subject: [PATCH 3/7] [azure parallel] [scipy-dev] tweaks --- sklearn/linear_model/_glm/_newton_solver.py | 4 ++-- sklearn/linear_model/_glm/glm.py | 4 ++-- sklearn/linear_model/_huber.py | 4 ++-- sklearn/linear_model/_logistic.py | 4 ++-- sklearn/neighbors/_nca.py | 5 +++-- sklearn/neural_network/_multilayer_perceptron.py | 4 ++-- sklearn/utils/fixes.py | 14 +++++++------- 7 files changed, 20 insertions(+), 19 deletions(-) diff --git a/sklearn/linear_model/_glm/_newton_solver.py b/sklearn/linear_model/_glm/_newton_solver.py index 944789d5fec8a..24085f903882f 100644 --- a/sklearn/linear_model/_glm/_newton_solver.py +++ b/sklearn/linear_model/_glm/_newton_solver.py @@ -14,7 +14,7 @@ from ..._loss.loss import HalfSquaredError from ...exceptions import ConvergenceWarning -from ...utils.fixes import _get_lbfgs_options_dict +from ...utils.fixes import _get_additional_lbfgs_options_dict from ...utils.optimize import _check_optimize_result from .._linear_loss import LinearModelLoss @@ -190,7 +190,7 @@ def fallback_lbfgs_solve(self, X, y, sample_weight): "maxls": 50, # default is 20 "gtol": self.tol, "ftol": 64 * np.finfo(np.float64).eps, - **_get_lbfgs_options_dict("iprint", self.verbose - 1), + **_get_additional_lbfgs_options_dict("iprint", self.verbose - 1), }, args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads), ) diff --git a/sklearn/linear_model/_glm/glm.py b/sklearn/linear_model/_glm/glm.py index eba379f219350..8ba24878b95b2 100644 --- a/sklearn/linear_model/_glm/glm.py +++ b/sklearn/linear_model/_glm/glm.py @@ -21,7 +21,7 @@ from ...utils import check_array from ...utils._openmp_helpers import _openmp_effective_n_threads from ...utils._param_validation import Hidden, Interval, StrOptions -from ...utils.fixes import _get_lbfgs_options_dict +from ...utils.fixes import _get_additional_lbfgs_options_dict from ...utils.optimize import _check_optimize_result from ...utils.validation import _check_sample_weight, check_is_fitted, validate_data from .._linear_loss import LinearModelLoss @@ -279,7 +279,7 @@ def fit(self, X, y, sample_weight=None): # The point is that ftol is very small, but a bit larger than # machine precision for float64, which is the dtype used by lbfgs. "ftol": 64 * np.finfo(float).eps, - **_get_lbfgs_options_dict("iprint", self.verbose - 1), + **_get_additional_lbfgs_options_dict("iprint", self.verbose - 1), }, args=(X, y, sample_weight, l2_reg_strength, n_threads), ) diff --git a/sklearn/linear_model/_huber.py b/sklearn/linear_model/_huber.py index 5528f282ff563..87e735ec998db 100644 --- a/sklearn/linear_model/_huber.py +++ b/sklearn/linear_model/_huber.py @@ -10,7 +10,7 @@ from ..utils._mask import axis0_safe_slice from ..utils._param_validation import Interval from ..utils.extmath import safe_sparse_dot -from ..utils.fixes import _get_lbfgs_options_dict +from ..utils.fixes import _get_additional_lbfgs_options_dict from ..utils.optimize import _check_optimize_result from ..utils.validation import _check_sample_weight, validate_data from ._base import LinearModel @@ -333,7 +333,7 @@ def fit(self, X, y, sample_weight=None): options={ "maxiter": self.max_iter, "gtol": self.tol, - **_get_lbfgs_options_dict("iprint", -1), + **_get_additional_lbfgs_options_dict("iprint", -1), }, bounds=bounds, ) diff --git a/sklearn/linear_model/_logistic.py b/sklearn/linear_model/_logistic.py index a7137aa585982..2c564bb1a8b5a 100644 --- a/sklearn/linear_model/_logistic.py +++ b/sklearn/linear_model/_logistic.py @@ -30,7 +30,7 @@ ) from ..utils._param_validation import Hidden, Interval, StrOptions from ..utils.extmath import row_norms, softmax -from ..utils.fixes import _get_lbfgs_options_dict +from ..utils.fixes import _get_additional_lbfgs_options_dict from ..utils.metadata_routing import ( MetadataRouter, MethodMapping, @@ -467,7 +467,7 @@ def _logistic_regression_path( "maxls": 50, # default is 20 "gtol": tol, "ftol": 64 * np.finfo(float).eps, - **_get_lbfgs_options_dict("iprint", iprint), + **_get_additional_lbfgs_options_dict("iprint", iprint), }, ) n_iter_i = _check_optimize_result( diff --git a/sklearn/neighbors/_nca.py b/sklearn/neighbors/_nca.py index c89742edb115a..8383f95338932 100644 --- a/sklearn/neighbors/_nca.py +++ b/sklearn/neighbors/_nca.py @@ -25,7 +25,7 @@ from ..preprocessing import LabelEncoder from ..utils._param_validation import Interval, StrOptions from ..utils.extmath import softmax -from ..utils.fixes import _get_lbfgs_options_dict +from ..utils.fixes import _get_additional_lbfgs_options_dict from ..utils.multiclass import check_classification_targets from ..utils.random import check_random_state from ..utils.validation import check_array, check_is_fitted, validate_data @@ -314,7 +314,8 @@ def fit(self, X, y): "x0": transformation, "tol": self.tol, "options": dict( - maxiter=self.max_iter, **_get_lbfgs_options_dict("disp", disp) + maxiter=self.max_iter, + **_get_additional_lbfgs_options_dict("disp", disp), ), "callback": self._callback, } diff --git a/sklearn/neural_network/_multilayer_perceptron.py b/sklearn/neural_network/_multilayer_perceptron.py index 7b8dc5d7e2395..e8260164202e6 100644 --- a/sklearn/neural_network/_multilayer_perceptron.py +++ b/sklearn/neural_network/_multilayer_perceptron.py @@ -31,7 +31,7 @@ ) from ..utils._param_validation import Interval, Options, StrOptions from ..utils.extmath import safe_sparse_dot -from ..utils.fixes import _get_lbfgs_options_dict +from ..utils.fixes import _get_additional_lbfgs_options_dict from ..utils.metaestimators import available_if from ..utils.multiclass import ( _check_partial_fit_first_call, @@ -587,7 +587,7 @@ def _fit_lbfgs( "maxfun": self.max_fun, "maxiter": self.max_iter, "gtol": self.tol, - **_get_lbfgs_options_dict("iprint", iprint), + **_get_additional_lbfgs_options_dict("iprint", iprint), }, args=( X, diff --git a/sklearn/utils/fixes.py b/sklearn/utils/fixes.py index da010600c7649..9edf81293c0cc 100644 --- a/sklearn/utils/fixes.py +++ b/sklearn/utils/fixes.py @@ -394,11 +394,11 @@ def _in_unstable_openblas_configuration(): return False -# TODO: Remove when Scipy 1.15 is the minimum supported version -# In scipy 1.15, wwhen LBFGS was converted from Fortran to C the internal info -# details (via 'iprint' options key) were dropped, see -# https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. -# For scipy 1.15, iprint has no effect and for scipy >= 1.16 a +# TODO: Remove when Scipy 1.15 is the minimum supported version In scipy 1.15, +# the internal info details (via 'iprint' and 'disp' options) were dropped, +# following the LBFGS rewrite from Fortran to C, see +# https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. For +# scipy 1.15, 'iprint' and 'disp' have no effect and for scipy >= 1.16 a # DeprecationWarning is emitted. -def _get_lbfgs_options_dict(key, iprint_value): - return {} if sp_version >= parse_version("1.15") else {key: iprint_value} +def _get_additional_lbfgs_options_dict(key, value): + return {} if sp_version >= parse_version("1.15") else {key: value} From a799c27aa0dc1213d42e5154bbac50cdfb4240c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 26 Jun 2025 09:58:52 +0200 Subject: [PATCH 4/7] Update sklearn/utils/fixes.py Co-authored-by: Tim Head --- sklearn/utils/fixes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sklearn/utils/fixes.py b/sklearn/utils/fixes.py index 9edf81293c0cc..5d807188e71ee 100644 --- a/sklearn/utils/fixes.py +++ b/sklearn/utils/fixes.py @@ -394,7 +394,7 @@ def _in_unstable_openblas_configuration(): return False -# TODO: Remove when Scipy 1.15 is the minimum supported version In scipy 1.15, +# TODO: Remove when Scipy 1.15 is the minimum supported version. In scipy 1.15, # the internal info details (via 'iprint' and 'disp' options) were dropped, # following the LBFGS rewrite from Fortran to C, see # https://github.com/scipy/scipy/issues/23186#issuecomment-2987801035. For From 2c7010b63061947ae89b84bf85590a63b9c4d3fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 26 Jun 2025 09:59:29 +0200 Subject: [PATCH 5/7] [azure parallel] trigger CI From 5df5d98831694e8c7e6fe2b423f17f2269e9841b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 26 Jun 2025 10:13:04 +0200 Subject: [PATCH 6/7] [azure parallel] [scipy-dev] trigger scipy-dev CI From e830d8f7200e7e5bba9c937bd36a06a24edcf3d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Est=C3=A8ve?= Date: Thu, 26 Jun 2025 11:27:37 +0200 Subject: [PATCH 7/7] [azure parallel] [scipy-dev] trigger scipy-dev CI after fixing conflicts