From 169d5c23665329c881191e3e85c4459813e07d2d Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Tue, 7 Nov 2023 13:31:55 -0500 Subject: [PATCH] Add OptimizationFunction conversion and adtype arg and tests --- src/problems/basic_problems.jl | 11 +++++++++-- test/downstream/nllsopt.jl | 29 +++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 test/downstream/nllsopt.jl diff --git a/src/problems/basic_problems.jl b/src/problems/basic_problems.jl index 5b6a6590c..cfed37f16 100644 --- a/src/problems/basic_problems.jl +++ b/src/problems/basic_problems.jl @@ -688,11 +688,18 @@ function OptimizationProblem(f, args...; kwargs...) OptimizationProblem{true}(OptimizationFunction{true}(f), args...; kwargs...) end -function OptimizationProblem(prob::NonlinearLeastSquaresProblem; kwargs...) +function OptimizationFunction(f::NonlinearFunction, adtype::AbstractADType = NoAD(); kwargs...) + if isinplace(f) + throw(ArgumentError("Converting NonlinearFunction to OptimizationFunction is not supported with in-place functions yet.")) + end + OptimizationFunction((u, p) -> sum(abs2, f(u, p)), adtype; kwargs...) +end + +function OptimizationProblem(prob::NonlinearLeastSquaresProblem, adtype::AbstractADType = NoAD(); kwargs...) if isinplace(prob) throw(ArgumentError("Converting NonlinearLeastSquaresProblem to OptimizationProblem is not supported with in-place functions yet.")) end - optf = OptimizationFunction(sum ∘ prob.f, grad = (Jv, u, p) -> prob.f.jvp(Jv, prob.f(u, p), u, p), kwargs...) + optf = OptimizationFunction(prob.f, adtype; kwargs...) return OptimizationProblem(optf, prob.u0, prob.p; prob.kwargs..., kwargs...) end diff --git a/test/downstream/nllsopt.jl b/test/downstream/nllsopt.jl new file mode 100644 index 000000000..66f1cc7d9 --- /dev/null +++ b/test/downstream/nllsopt.jl @@ -0,0 +1,29 @@ +using NonlinearSolve, Optimization, OptimizationNLopt, ForwardDiff +import FastLevenbergMarquardt, LeastSquaresOptim + +true_function(x, θ) = @. θ[1] * exp(θ[2] * x) * cos(θ[3] * x + θ[4]) + +θ_true = [1.0, 0.1, 2.0, 0.5] + +x = [-1.0, -0.5, 0.0, 0.5, 1.0] + +y_target = true_function(x, θ_true) + +function loss_function(θ, p) + ŷ = true_function(p, θ) + return ŷ .- y_target +end + +θ_init = θ_true .+ randn!(similar(θ_true)) * 0.1 +prob_oop = NonlinearLeastSquaresProblem{false}(loss_function, θ_init, x) + +solver = LevenbergMarquardt() + +@time sol = solve(prob, solver; maxiters = 10000, abstol = 1e-8) + +optf = OptimizationFunction(prob_oop.f, AutoForwardDiff()) +optprob = OptimizationProblem(optf, prob_oop.u0, prob_oop.p) +@time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8) + +optprob = OptimizationProblem(prob_oop, AutoForwardDiff()) +@time sol = solve(optprob, NLopt.LD_LBFGS(); maxiters = 10000, abstol = 1e-8) \ No newline at end of file