Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use DifferentiationInterface for autodiff, allow ADTypes #153

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
6 changes: 5 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,19 @@ uuid = "d41bc354-129a-5804-8e4c-c37616107c6c"
version = "7.8.3"

[deps]
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
DiffResults = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
DifferentiationInterface = "a0c0ee7d-e4b9-4e03-894e-1c5f64a51d63"
Distributed = "8ba89e20-285c-5b6f-9357-94700520ee1b"
FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"

[compat]
ADTypes = "1.11.0"
DiffResults = "1.0"
ForwardDiff = "0.10"
DifferentiationInterface = "0.6.24"
FiniteDiff = "2.0"
ForwardDiff = "0.10"
julia = "1.5"

[extras]
Expand Down
17 changes: 17 additions & 0 deletions src/NLSolversBase.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ __precompile__(true)

module NLSolversBase

using ADTypes: AbstractADType, AutoForwardDiff, AutoFiniteDiff
import DifferentiationInterface as DI
using FiniteDiff, ForwardDiff, DiffResults
import Distributed: clear!
export AbstractObjective,
Expand Down Expand Up @@ -54,9 +56,24 @@ function finitediff_fdtype(autodiff)
fdtype
end

forwarddiff_chunksize(::Nothing) = nothing
forwarddiff_chunksize(::ForwardDiff.Chunk{C}) where {C} = C

is_finitediff(autodiff) = autodiff ∈ (:central, :finite, :finiteforward, :finitecomplex)
is_forwarddiff(autodiff) = autodiff ∈ (:forward, :forwarddiff, true)

get_adtype(autodiff::AbstractADType) = autodiff

function get_adtype(autodiff, chunk=nothing)
if is_finitediff(autodiff)
return AutoFiniteDiff(; fdtype=finitediff_fdtype(autodiff)())
elseif is_forwarddiff(autodiff)
return AutoForwardDiff(; chunksize=forwarddiff_chunksize(chunk))
else
error("The autodiff value $autodiff is not supported. Use :finite or :forward.")
end
end

x_of_nans(x, Tf=eltype(x)) = fill!(Tf.(x), Tf(NaN))

include("objective_types/inplace_factory.jl")
Expand Down
26 changes: 6 additions & 20 deletions src/objective_types/constraints.jl
Original file line number Diff line number Diff line change
Expand Up @@ -139,27 +139,13 @@ function OnceDifferentiableConstraints(c!, lx::AbstractVector, ux::AbstractVecto
xcache = zeros(T, sizex)
ccache = zeros(T, sizec)

if is_finitediff(autodiff)
ccache2 = similar(ccache)
fdtype = finitediff_fdtype(autodiff)
jacobian_cache = FiniteDiff.JacobianCache(xcache, ccache,ccache2,fdtype)
function jfinite!(J, x)
FiniteDiff.finite_difference_jacobian!(J, c!, x, jacobian_cache)
J
end
return OnceDifferentiableConstraints(c!, jfinite!, bounds)
elseif is_forwarddiff(autodiff)
jac_cfg = ForwardDiff.JacobianConfig(c!, ccache, xcache, chunk)
ForwardDiff.checktag(jac_cfg, c!, xcache)

function jforward!(J, x)
ForwardDiff.jacobian!(J, c!, ccache, x, jac_cfg, Val{false}())
J
end
return OnceDifferentiableConstraints(c!, jforward!, bounds)
else
error("The autodiff value $autodiff is not support. Use :finite or :forward.")
backend = get_adtype(autodiff, chunk)
jac_prep = DI.prepare_jacobian(c!, ccache, backend, xcache)
function j!(_j, _x)
DI.jacobian!(c!, ccache, _j, jac_prep, backend, _x)
return _j
end
return OnceDifferentiableConstraints(c!, j!, bounds)
end


Expand Down
115 changes: 20 additions & 95 deletions src/objective_types/oncedifferentiable.jl
Original file line number Diff line number Diff line change
Expand Up @@ -43,37 +43,16 @@ function OnceDifferentiable(f, x_seed::AbstractArray{T},

return OnceDifferentiable(fF, dfF, fdfF, x_seed, F, DF)
else
if is_finitediff(autodiff)

# Figure out which Val-type to use for FiniteDiff based on our
# symbol interface.
fdtype = finitediff_fdtype(autodiff)
df_array_spec = DF
x_array_spec = x_seed
return_spec = typeof(F)
gcache = FiniteDiff.GradientCache(df_array_spec, x_array_spec, fdtype, return_spec)

function g!(storage, x)
FiniteDiff.finite_difference_gradient!(storage, f, x, gcache)
return
end
function fg!(storage, x)
g!(storage, x)
return f(x)
end
elseif is_forwarddiff(autodiff)
gcfg = ForwardDiff.GradientConfig(f, x_seed, chunk)
g! = (out, x) -> ForwardDiff.gradient!(out, f, x, gcfg)

fg! = (out, x) -> begin
gr_res = DiffResults.DiffResult(zero(T), out)
ForwardDiff.gradient!(gr_res, f, x, gcfg)
DiffResults.value(gr_res)
end
else
error("The autodiff value $autodiff is not supported. Use :finite or :forward.")
backend = get_adtype(autodiff, chunk)
grad_prep = DI.prepare_gradient(f, backend, x_seed)
function g!(_g, _x)
DI.gradient!(f, _g, grad_prep, backend, _x)
return nothing
end
function fg!(_g, _x)
y, _ = DI.value_and_gradient!(f, _g, grad_prep, backend, _x)
return y
end

return OnceDifferentiable(f, g!, fg!, x_seed, F, DF)
end
end
Expand All @@ -99,72 +78,18 @@ function OnceDifferentiable(f, x_seed::AbstractArray, F::AbstractArray, DF::Abst
fdfF = make_fdf(f, x_seed, F)
return OnceDifferentiable(fF, dfF, fdfF, x_seed, F, DF)
else
if is_finitediff(autodiff)
# Figure out which Val-type to use for FiniteDiff based on our
# symbol interface.
fdtype = finitediff_fdtype(autodiff)
# Apparently only the third input is aliased.
j_finitediff_cache = FiniteDiff.JacobianCache(copy(x_seed), copy(F), copy(F), fdtype)
if autodiff == :finiteforward
# These copies can be done away with if we add a keyword for
# reusing arrays instead for overwriting them.
Fx = copy(F)
DF = copy(DF)

x_f, x_df = x_of_nans(x_seed), x_of_nans(x_seed)
f_calls, j_calls = [0,], [0,]
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we try to preserve this call count? I don't think it makes a lot of sense for autodiff in general, because some backends will not go through the actual code to compute the gradient (unlike ForwardDiff and FiniteDiff).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In addition, I don't think this call count was present for every operator or every autodiff method

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have to look. It's done this way because people complained that it didn't match with what they printed from their objective, but I agree that people will have to calculate the number themselves in the special case of finite diff

function j_finiteforward!(J, x)
# Exploit the possibility that it might be that x_f == x
# then we don't have to call f again.

# if at least one element of x_f is different from x, update
if any(x_f .!= x)
f(Fx, x)
f_calls .+= 1
end

FiniteDiff.finite_difference_jacobian!(J, f, x, j_finitediff_cache, Fx)
end
function fj_finiteforward!(F, J, x)
f(F, x)
FiniteDiff.finite_difference_jacobian!(J, f, x, j_finitediff_cache, F)
end


return OnceDifferentiable(f, j_finiteforward!, fj_finiteforward!, Fx, DF, x_f, x_df, f_calls, j_calls)
end

function fj_finitediff!(F, J, x)
f(F, x)
FiniteDiff.finite_difference_jacobian!(J, f, x, j_finitediff_cache)
F
end
function j_finitediff!(J, x)
F_cache = copy(F)
fj_finitediff!(F_cache, J, x)
end

return OnceDifferentiable(f, j_finitediff!, fj_finitediff!, x_seed, F, DF)

elseif is_forwarddiff(autodiff)

jac_cfg = ForwardDiff.JacobianConfig(f, F, x_seed, chunk)
ForwardDiff.checktag(jac_cfg, f, x_seed)

F2 = copy(F)
function j_forwarddiff!(J, x)
ForwardDiff.jacobian!(J, f, F2, x, jac_cfg, Val{false}())
end
function fj_forwarddiff!(F, J, x)
jac_res = DiffResults.DiffResult(F, J)
ForwardDiff.jacobian!(jac_res, f, F2, x, jac_cfg, Val{false}())
DiffResults.value(jac_res)
end

return OnceDifferentiable(f, j_forwarddiff!, fj_forwarddiff!, x_seed, F, DF)
else
error("The autodiff value $(autodiff) is not supported. Use :finite or :forward.")
F2 = similar(F)
backend = get_adtype(autodiff, chunk)
jac_prep = DI.prepare_jacobian(f, F2, backend, x_seed)
function j!(_j, _x)
DI.jacobian!(f, F2, _j, jac_prep, backend, _x)
return _j
end
function fj!(_y, _j, _x)
y, _ = DI.value_and_jacobian!(f, _y, _j, jac_prep, backend, _x)
return y
end
return OnceDifferentiable(f, j!, fj!, x_seed, F, DF)
end
end

Expand Down
92 changes: 24 additions & 68 deletions src/objective_types/twicedifferentiable.jl
Original file line number Diff line number Diff line change
Expand Up @@ -54,23 +54,11 @@ function TwiceDifferentiable(f, g,
g! = df!_from_df(g, F, inplace)
fg! = make_fdf(x_seed, F, f, g!)

if is_finitediff(autodiff)

# Figure out which Val-type to use for FiniteDiff based on our
# symbol interface.
fdtype = finitediff_fdtype(autodiff)

jcache = FiniteDiff.JacobianCache(x_seed, fdtype)
function h!(storage, x)
FiniteDiff.finite_difference_jacobian!(storage, g!, x, jcache)
return
end

elseif is_forwarddiff(autodiff)
hcfg = ForwardDiff.HessianConfig(f, copy(x_seed))
h! = (out, x) -> ForwardDiff.hessian!(out, f, x, hcfg)
else
error("The autodiff value $(autodiff) is not supported. Use :finite or :forward.")
backend = get_adtype(autodiff)
hess_prep = DI.prepare_hessian(f, backend, x_seed)
function h!(_h, _x)
DI.hessian!(f, _h, hess_prep, backend, _x)
return _h
end
TwiceDifferentiable(f, g!, fg!, h!, x_seed, F)
end
Expand All @@ -80,63 +68,31 @@ TwiceDifferentiable(d::NonDifferentiable, x_seed::AbstractVector{T} = d.x_f, F::

function TwiceDifferentiable(d::OnceDifferentiable, x_seed::AbstractVector{T} = d.x_f,
F::Real = real(zero(T)); autodiff = :finite) where T<:Real
if is_finitediff(autodiff)

# Figure out which Val-type to use for FiniteDiff based on our
# symbol interface.
fdtype = finitediff_fdtype(autodiff)

jcache = FiniteDiff.JacobianCache(x_seed, fdtype)
function h!(storage, x)
FiniteDiff.finite_difference_jacobian!(storage, d.df, x, jcache)
return
end
elseif is_forwarddiff(autodiff)
hcfg = ForwardDiff.HessianConfig(d.f, copy(gradient(d)))
h! = (out, x) -> ForwardDiff.hessian!(out, d.f, x, hcfg)
else
error("The autodiff value $(autodiff) is not supported. Use :finite or :forward.")
backend = get_adtype(autodiff)
hess_prep = DI.prepare_hessian(d.f, backend, x_seed)
function h!(_h, _x)
DI.hessian!(d.f, _h, hess_prep, backend, _x)
return _h
end
return TwiceDifferentiable(d.f, d.df, d.fdf, h!, x_seed, F, gradient(d))
end

function TwiceDifferentiable(f, x::AbstractArray, F::Real = real(zero(eltype(x)));
autodiff = :finite, inplace = true)
if is_finitediff(autodiff)

# Figure out which Val-type to use for FiniteDiff based on our
# symbol interface.
fdtype = finitediff_fdtype(autodiff)
gcache = FiniteDiff.GradientCache(x, x, fdtype)

function g!(storage, x)
FiniteDiff.finite_difference_gradient!(storage, f, x, gcache)
return
end
function fg!(storage, x)
g!(storage, x)
return f(x)
end

function h!(storage, x)
FiniteDiff.finite_difference_hessian!(storage, f, x)
return
end
elseif is_forwarddiff(autodiff)

gcfg = ForwardDiff.GradientConfig(f, x)
g! = (out, x) -> ForwardDiff.gradient!(out, f, x, gcfg)

fg! = (out, x) -> begin
gr_res = DiffResults.DiffResult(zero(eltype(x)), out)
ForwardDiff.gradient!(gr_res, f, x, gcfg)
DiffResults.value(gr_res)
end

hcfg = ForwardDiff.HessianConfig(f, x)
h! = (out, x) -> ForwardDiff.hessian!(out, f, x, hcfg)
else
error("The autodiff value $(autodiff) is not supported. Use :finite or :forward.")
backend = get_adtype(autodiff)
grad_prep = DI.prepare_gradient(f, backend, x)
hess_prep = DI.prepare_hessian(f, backend, x)
function g!(_g, _x)
DI.gradient!(f, _g, grad_prep, backend, _x)
return nothing
end
function fg!(_g, _x)
y, _ = DI.gradient!(f, _g, grad_prep, backend, _x)
return y
end
function h!(_h, _x)
DI.hessian!(f, _h, hess_prep, backend, _x)
return _h
end
TwiceDifferentiable(f, g!, fg!, h!, x, F)
end
Expand Down
4 changes: 2 additions & 2 deletions test/autodiff.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@
gx = g(NLSolversBase.alloc_DF(x, 0.0), x)
h(H, x) = copyto!(H, Diagonal(6 .* x))
hx = h(fill(0.0, nx, nx), x)
for dtype in (OnceDifferentiable, TwiceDifferentiable)
for autodiff in (:finite, :forward)
@testset for dtype in (OnceDifferentiable, TwiceDifferentiable)
@testset for autodiff in (:finite, :forward)
# :forward should be exact, but :finite will not be
differentiable = dtype(f, copy(x); autodiff = autodiff)
value!(differentiable, copy(x))
Expand Down
18 changes: 10 additions & 8 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,13 @@ function exponential_hessian_product!(storage, x)
storage[2, 2] = 2.0 * exp((3.0 - x[1])^2) * (2.0 * x[2]^2 - 12.0 * x[2] + 19)
end

include("objective_types.jl")
include("interface.jl")
include("incomplete.jl")
include("constraints.jl")
include("abstractarrays.jl")
include("autodiff.jl")
include("sparse.jl")
include("kwargs.jl")
@testset verbose=true "NLSolversBase.jl" begin
include("objective_types.jl")
include("interface.jl")
include("incomplete.jl")
include("constraints.jl")
include("abstractarrays.jl")
include("autodiff.jl")
include("sparse.jl")
include("kwargs.jl")
end