From c1c19fd2c5a11b1642012655bbbf1739c68a94b8 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Tue, 28 Jan 2025 10:08:12 -0500 Subject: [PATCH 1/5] Use lagh from NLPModels as well to avoid forward mode type issues --- .../src/OptimizationNLPModels.jl | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl index 65e67be20..cf9d5166d 100644 --- a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl +++ b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl @@ -1,6 +1,6 @@ module OptimizationNLPModels -using Reexport +using Reexport, SparseArrays @reexport using NLPModels, Optimization, ADTypes """ @@ -21,9 +21,20 @@ function SciMLBase.OptimizationFunction(nlpmodel::AbstractNLPModel, cons(res, x, p) = NLPModels.cons!(nlpmodel, x, res) cons_j(J, x, p) = (J .= NLPModels.jac(nlpmodel, x)) cons_jvp(Jv, v, x, p) = NLPModels.jprod!(nlpmodel, x, v, Jv) + function lag_h(h, θ, σ, λ) + H = NLPModels.hess(nlpmodel, θ, λ; obj_weight = σ) + k = 0 + rows, cols, _ = findnz(H) + for (i, j) in zip(rows, cols) + if i <= j + k += 1 + h[k] = H[i, j] + end + end + end return OptimizationFunction( - f, adtype; grad, hess, hv, cons, cons_j, cons_jvp, kwargs...) + f, adtype; grad, hess, hv, cons, cons_j, cons_jvp, lag_h, kwargs...) end return OptimizationFunction(f, adtype; grad, hess, hv, kwargs...) From 3ddaa53b8ad94c924e1a00c8ea677158d293f51d Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Tue, 28 Jan 2025 10:27:14 -0500 Subject: [PATCH 2/5] add sparsearrays dep --- lib/OptimizationNLPModels/Project.toml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index f72e628b4..24cf6c9de 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -8,22 +8,24 @@ ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [compat] ADTypes = "1.7" NLPModels = "0.21" Optimization = "4" Reexport = "1.2" +SparseArrays = "1.11.0" julia = "1.9" [extras] +Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856" +OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" -Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" -OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" [targets] test = ["Test", "NLPModelsTest", "OptimizationOptimJL", "ReverseDiff", "Zygote", "Ipopt", "OptimizationMOI"] From 4db79bf70acfb152eab497a3c0e5dcdeb2d07560 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Mon, 10 Mar 2025 21:55:25 -0400 Subject: [PATCH 3/5] more fixes --- lib/OptimizationNLPModels/src/OptimizationNLPModels.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl index cf9d5166d..a5513c581 100644 --- a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl +++ b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl @@ -21,10 +21,10 @@ function SciMLBase.OptimizationFunction(nlpmodel::AbstractNLPModel, cons(res, x, p) = NLPModels.cons!(nlpmodel, x, res) cons_j(J, x, p) = (J .= NLPModels.jac(nlpmodel, x)) cons_jvp(Jv, v, x, p) = NLPModels.jprod!(nlpmodel, x, v, Jv) - function lag_h(h, θ, σ, λ) + function lag_h(h, θ, σ, λ, p) H = NLPModels.hess(nlpmodel, θ, λ; obj_weight = σ) k = 0 - rows, cols, _ = findnz(H) + rows, cols, _ = findnz(H.data) for (i, j) in zip(rows, cols) if i <= j k += 1 From 3da7d9fc796c9f5bcf6e69f74077305203ede1a9 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 14 Mar 2025 14:08:26 -0400 Subject: [PATCH 4/5] relax sparsearrays --- lib/OptimizationNLPModels/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index 24cf6c9de..31bc2a94a 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -15,7 +15,7 @@ ADTypes = "1.7" NLPModels = "0.21" Optimization = "4" Reexport = "1.2" -SparseArrays = "1.11.0" +SparseArrays = "1" julia = "1.9" [extras] From e048bfef5c48cfabf81df142e328f3ee662f9e99 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Wed, 9 Apr 2025 15:18:15 -0400 Subject: [PATCH 5/5] format --- .../src/optimization_packages/optimization.md | 40 +++++++++---------- .../src/OptimizationOptimJL.jl | 3 +- test/diffeqfluxtests.jl | 3 +- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/docs/src/optimization_packages/optimization.md b/docs/src/optimization_packages/optimization.md index e36728b11..ddd3bf062 100644 --- a/docs/src/optimization_packages/optimization.md +++ b/docs/src/optimization_packages/optimization.md @@ -4,28 +4,28 @@ There are some solvers that are available in the Optimization.jl package directl ## Methods -- `LBFGS`: The popular quasi-Newton method that leverages limited memory BFGS approximation of the inverse of the Hessian. Through a wrapper over the [L-BFGS-B](https://users.iems.northwestern.edu/%7Enocedal/lbfgsb.html) fortran routine accessed from the [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl/) package. It directly supports box-constraints. - - This can also handle arbitrary non-linear constraints through a Augmented Lagrangian method with bounds constraints described in 17.4 of Numerical Optimization by Nocedal and Wright. Thus serving as a general-purpose nonlinear optimization solver available directly in Optimization.jl. + - `LBFGS`: The popular quasi-Newton method that leverages limited memory BFGS approximation of the inverse of the Hessian. Through a wrapper over the [L-BFGS-B](https://users.iems.northwestern.edu/%7Enocedal/lbfgsb.html) fortran routine accessed from the [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl/) package. It directly supports box-constraints. + + This can also handle arbitrary non-linear constraints through a Augmented Lagrangian method with bounds constraints described in 17.4 of Numerical Optimization by Nocedal and Wright. Thus serving as a general-purpose nonlinear optimization solver available directly in Optimization.jl. -- `Sophia`: Based on the recent paper https://arxiv.org/abs/2305.14342. It incorporates second order information in the form of the diagonal of the Hessian matrix hence avoiding the need to compute the complete hessian. It has been shown to converge faster than other first order methods such as Adam and SGD. + - `Sophia`: Based on the recent paper https://arxiv.org/abs/2305.14342. It incorporates second order information in the form of the diagonal of the Hessian matrix hence avoiding the need to compute the complete hessian. It has been shown to converge faster than other first order methods such as Adam and SGD. + + + `solve(problem, Sophia(; η, βs, ϵ, λ, k, ρ))` - + `solve(problem, Sophia(; η, βs, ϵ, λ, k, ρ))` - - + `η` is the learning rate - + `βs` are the decay of momentums - + `ϵ` is the epsilon value - + `λ` is the weight decay parameter - + `k` is the number of iterations to re-compute the diagonal of the Hessian matrix - + `ρ` is the momentum - + Defaults: - - * `η = 0.001` - * `βs = (0.9, 0.999)` - * `ϵ = 1e-8` - * `λ = 0.1` - * `k = 10` - * `ρ = 0.04` + + `η` is the learning rate + + `βs` are the decay of momentums + + `ϵ` is the epsilon value + + `λ` is the weight decay parameter + + `k` is the number of iterations to re-compute the diagonal of the Hessian matrix + + `ρ` is the momentum + + Defaults: + + * `η = 0.001` + * `βs = (0.9, 0.999)` + * `ϵ = 1e-8` + * `λ = 0.1` + * `k = 10` + * `ρ = 0.04` ## Examples diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index 24de50d31..34a2ae679 100644 --- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -38,13 +38,12 @@ function __map_optimizer_args(cache::OptimizationCache, abstol::Union{Number, Nothing} = nothing, reltol::Union{Number, Nothing} = nothing, kwargs...) - mapped_args = (; extended_trace = true, kwargs...) if !isnothing(abstol) mapped_args = (; mapped_args..., f_abstol = abstol) end - + if !isnothing(callback) mapped_args = (; mapped_args..., callback = callback) end diff --git a/test/diffeqfluxtests.jl b/test/diffeqfluxtests.jl index 6ec24e2cd..4a6a170c0 100644 --- a/test/diffeqfluxtests.jl +++ b/test/diffeqfluxtests.jl @@ -70,7 +70,8 @@ ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps)) dudt2 = Lux.Chain(x -> x .^ 3, Lux.Dense(2, 50, tanh), Lux.Dense(50, 2)) -prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps, abstol = 1e-8, reltol = 1e-8) +prob_neuralode = NeuralODE( + dudt2, tspan, Tsit5(), saveat = tsteps, abstol = 1e-8, reltol = 1e-8) pp, st = Lux.setup(rng, dudt2) pp = ComponentArray(pp)