Skip to content

Commit

Permalink
Bump bijectors compat (#2052)
Browse files Browse the repository at this point in the history
* CompatHelper: bump compat for Bijectors to 0.13, (keep existing compat)

* Update Project.toml

* Replacement for #2039 (#2040)

* Fix testset for external samplers

* Update abstractmcmc.jl

* Update test/contrib/inference/abstractmcmc.jl

Co-authored-by: Tor Erlend Fjelde <[email protected]>

* Update test/contrib/inference/abstractmcmc.jl

Co-authored-by: Tor Erlend Fjelde <[email protected]>

* Update FillArrays compat to 1.4.1 (#2035)

* Update FillArrays compat to 1.4.0

* Update test compat

* Try to enable ReverseDiff tests

* Update Project.toml

* Update Project.toml

* Bump version

* Revert dependencies on FillArrays (#2042)

* Update Project.toml

* Update Project.toml

* Fix redundant definition of `getstats` (#2044)

* Fix redundant definition of `getstats`

* Update Inference.jl

* Revert "Update Inference.jl"

This reverts commit e4f51c2.

* Bump version

---------

Co-authored-by: Hong Ge <[email protected]>

* Transfer some test utility function into DynamicPPL (#2049)

* Update OptimInterface.jl

* Only run optimisation tests in numerical stage.

* fix function lookup after moving functions

---------

Co-authored-by: Xianda Sun <[email protected]>

* Move Optim support to extension (#2051)

* Move Optim support to extension

* More imports

* Update Project.toml

---------

Co-authored-by: Hong Ge <[email protected]>

---------

Co-authored-by: CompatHelper Julia <[email protected]>
Co-authored-by: haris organtzidis <[email protected]>
Co-authored-by: Tor Erlend Fjelde <[email protected]>
Co-authored-by: David Widmann <[email protected]>
Co-authored-by: Xianda Sun <[email protected]>
Co-authored-by: Cameron Pfiffer <[email protected]>
  • Loading branch information
7 people authored Jul 27, 2023
1 parent 4ab5939 commit 74869f4
Show file tree
Hide file tree
Showing 8 changed files with 97 additions and 117 deletions.
16 changes: 12 additions & 4 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "Turing"
uuid = "fce5fe82-541a-59a6-adf8-730c64b5f9a0"
version = "0.26.4"
version = "0.27"

[deps]
AbstractMCMC = "80f14c24-f653-4e6a-9b94-39d6b0f70001"
Expand All @@ -16,7 +16,6 @@ DistributionsAD = "ced4e74d-a319-5a8a-b0ac-84af2272839c"
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8"
EllipticalSliceSampling = "cad2338a-1db2-11e9-3401-43bc07c9ede2"
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
Libtask = "6f1fad26-d15e-5dc8-ae53-837a1d7b8c9f"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Expand Down Expand Up @@ -44,20 +43,20 @@ AdvancedMH = "0.6.8, 0.7"
AdvancedPS = "0.4"
AdvancedVI = "0.2"
BangBang = "0.3"
Bijectors = "0.12"
Bijectors = "0.13.2"
DataStructures = "0.18"
Distributions = "0.23.3, 0.24, 0.25"
DistributionsAD = "0.6"
DocStringExtensions = "0.8, 0.9"
DynamicPPL = "0.23"
EllipticalSliceSampling = "0.5, 1"
FillArrays = "=1.0.0"
ForwardDiff = "0.10.3"
Libtask = "0.7, 0.8"
LogDensityProblems = "2"
LogDensityProblemsAD = "1.4"
MCMCChains = "5, 6"
NamedArrays = "0.9"
Optim = "1"
Reexport = "0.2, 1"
Requires = "0.5, 1.0"
SciMLBase = "1.37.1"
Expand All @@ -68,3 +67,12 @@ StatsBase = "0.32, 0.33, 0.34"
StatsFuns = "0.8, 0.9, 1"
Tracker = "0.2.3"
julia = "1.7"

[weakdeps]
Optim = "429524aa-4258-5aef-a3af-852621145aeb"

[extensions]
TuringOptimExt = "Optim"

[extras]
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
92 changes: 47 additions & 45 deletions src/modes/OptimInterface.jl → ext/TuringOptimExt.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
using Setfield
using DynamicPPL: DefaultContext, LikelihoodContext
using DynamicPPL: DynamicPPL
import .Optim
import .Optim: optimize
import ..ForwardDiff
import NamedArrays
import StatsBase
import Printf
import StatsAPI

module TuringOptimExt

if isdefined(Base, :get_extension)
import Turing
import Turing: Distributions, DynamicPPL, ForwardDiff, NamedArrays, Printf, Setfield, Statistics, StatsAPI, StatsBase
import Optim
else
import ..Turing
import ..Turing: Distributions, DynamicPPL, ForwardDiff, NamedArrays, Printf, Setfield, Statistics, StatsAPI, StatsBase
import ..Optim
end

"""
ModeResult{
Expand All @@ -23,7 +23,7 @@ A wrapper struct to store various results from a MAP or MLE estimation.
struct ModeResult{
V<:NamedArrays.NamedArray,
O<:Optim.MultivariateOptimizationResults,
M<:OptimLogDensity
M<:Turing.OptimLogDensity
} <: StatsBase.StatisticalModel
"A vector with the resulting point estimates."
values::V
Expand Down Expand Up @@ -57,10 +57,10 @@ function StatsBase.coeftable(m::ModeResult; level::Real=0.95)
estimates = m.values.array[:, 1]
stderrors = StatsBase.stderror(m)
zscore = estimates ./ stderrors
p = map(z -> StatsAPI.pvalue(Normal(), z; tail=:both), zscore)
p = map(z -> StatsAPI.pvalue(Distributions.Normal(), z; tail=:both), zscore)

# Confidence interval (CI)
q = quantile(Normal(), (1 + level) / 2)
q = Statistics.quantile(Distributions.Normal(), (1 + level) / 2)
ci_low = estimates .- q .* stderrors
ci_high = estimates .+ q .* stderrors

Expand All @@ -80,7 +80,7 @@ function StatsBase.informationmatrix(m::ModeResult; hessian_function=ForwardDiff
# Hessian is computed with respect to the untransformed parameters.
linked = DynamicPPL.istrans(m.f.varinfo)
if linked
@set! m.f.varinfo = invlink!!(m.f.varinfo, m.f.model)
Setfield.@set! m.f.varinfo = DynamicPPL.invlink!!(m.f.varinfo, m.f.model)
end

# Calculate the Hessian.
Expand All @@ -90,7 +90,7 @@ function StatsBase.informationmatrix(m::ModeResult; hessian_function=ForwardDiff

# Link it back if we invlinked it.
if linked
@set! m.f.varinfo = link!!(m.f.varinfo, m.f.model)
Setfield.@set! m.f.varinfo = DynamicPPL.link!!(m.f.varinfo, m.f.model)
end

return NamedArrays.NamedArray(info, (varnames, varnames))
Expand Down Expand Up @@ -126,18 +126,18 @@ mle = optimize(model, MLE())
mle = optimize(model, MLE(), NelderMead())
```
"""
function Optim.optimize(model::Model, ::MLE, options::Optim.Options=Optim.Options(); kwargs...)
function Optim.optimize(model::DynamicPPL.Model, ::Turing.MLE, options::Optim.Options=Optim.Options(); kwargs...)
return _mle_optimize(model, options; kwargs...)
end
function Optim.optimize(model::Model, ::MLE, init_vals::AbstractArray, options::Optim.Options=Optim.Options(); kwargs...)
function Optim.optimize(model::DynamicPPL.Model, ::Turing.MLE, init_vals::AbstractArray, options::Optim.Options=Optim.Options(); kwargs...)
return _mle_optimize(model, init_vals, options; kwargs...)
end
function Optim.optimize(model::Model, ::MLE, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); kwargs...)
function Optim.optimize(model::DynamicPPL.Model, ::Turing.MLE, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); kwargs...)
return _mle_optimize(model, optimizer, options; kwargs...)
end
function Optim.optimize(
model::Model,
::MLE,
model::DynamicPPL.Model,
::Turing.MLE,
init_vals::AbstractArray,
optimizer::Optim.AbstractOptimizer,
options::Optim.Options=Optim.Options();
Expand All @@ -146,9 +146,9 @@ function Optim.optimize(
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end

function _mle_optimize(model::Model, args...; kwargs...)
ctx = OptimizationContext(DynamicPPL.LikelihoodContext())
return _optimize(model, OptimLogDensity(model, ctx), args...; kwargs...)
function _mle_optimize(model::DynamicPPL.Model, args...; kwargs...)
ctx = Turing.OptimizationContext(DynamicPPL.LikelihoodContext())
return _optimize(model, Turing.OptimLogDensity(model, ctx), args...; kwargs...)
end

"""
Expand All @@ -172,18 +172,18 @@ map_est = optimize(model, MAP(), NelderMead())
```
"""

function Optim.optimize(model::Model, ::MAP, options::Optim.Options=Optim.Options(); kwargs...)
function Optim.optimize(model::DynamicPPL.Model, ::Turing.MAP, options::Optim.Options=Optim.Options(); kwargs...)
return _map_optimize(model, options; kwargs...)
end
function Optim.optimize(model::Model, ::MAP, init_vals::AbstractArray, options::Optim.Options=Optim.Options(); kwargs...)
function Optim.optimize(model::DynamicPPL.Model, ::Turing.MAP, init_vals::AbstractArray, options::Optim.Options=Optim.Options(); kwargs...)
return _map_optimize(model, init_vals, options; kwargs...)
end
function Optim.optimize(model::Model, ::MAP, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); kwargs...)
function Optim.optimize(model::DynamicPPL.Model, ::Turing.MAP, optimizer::Optim.AbstractOptimizer, options::Optim.Options=Optim.Options(); kwargs...)
return _map_optimize(model, optimizer, options; kwargs...)
end
function Optim.optimize(
model::Model,
::MAP,
model::DynamicPPL.Model,
::Turing.MAP,
init_vals::AbstractArray,
optimizer::Optim.AbstractOptimizer,
options::Optim.Options=Optim.Options();
Expand All @@ -192,9 +192,9 @@ function Optim.optimize(
return _map_optimize(model, init_vals, optimizer, options; kwargs...)
end

function _map_optimize(model::Model, args...; kwargs...)
ctx = OptimizationContext(DynamicPPL.DefaultContext())
return _optimize(model, OptimLogDensity(model, ctx), args...; kwargs...)
function _map_optimize(model::DynamicPPL.Model, args...; kwargs...)
ctx = Turing.OptimizationContext(DynamicPPL.DefaultContext())
return _optimize(model, Turing.OptimLogDensity(model, ctx), args...; kwargs...)
end

"""
Expand All @@ -203,8 +203,8 @@ end
Estimate a mode, i.e., compute a MLE or MAP estimate.
"""
function _optimize(
model::Model,
f::OptimLogDensity,
model::DynamicPPL.Model,
f::Turing.OptimLogDensity,
optimizer::Optim.AbstractOptimizer=Optim.LBFGS(),
args...;
kwargs...
Expand All @@ -213,8 +213,8 @@ function _optimize(
end

function _optimize(
model::Model,
f::OptimLogDensity,
model::DynamicPPL.Model,
f::Turing.OptimLogDensity,
options::Optim.Options=Optim.Options(),
args...;
kwargs...
Expand All @@ -223,8 +223,8 @@ function _optimize(
end

function _optimize(
model::Model,
f::OptimLogDensity,
model::DynamicPPL.Model,
f::Turing.OptimLogDensity,
init_vals::AbstractArray=DynamicPPL.getparams(f),
options::Optim.Options=Optim.Options(),
args...;
Expand All @@ -234,8 +234,8 @@ function _optimize(
end

function _optimize(
model::Model,
f::OptimLogDensity,
model::DynamicPPL.Model,
f::Turing.OptimLogDensity,
init_vals::AbstractArray=DynamicPPL.getparams(f),
optimizer::Optim.AbstractOptimizer=Optim.LBFGS(),
options::Optim.Options=Optim.Options(),
Expand All @@ -244,8 +244,8 @@ function _optimize(
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
@set! f.varinfo = DynamicPPL.unflatten(f.varinfo, init_vals)
@set! f.varinfo = DynamicPPL.link!!(f.varinfo, model)
Setfield.@set! f.varinfo = DynamicPPL.unflatten(f.varinfo, init_vals)
Setfield.@set! f.varinfo = DynamicPPL.link!!(f.varinfo, model)
init_vals = DynamicPPL.getparams(f)

# Optimize!
Expand All @@ -258,10 +258,10 @@ function _optimize(

# Get the VarInfo at the MLE/MAP point, and run the model to ensure
# correct dimensionality.
@set! f.varinfo = DynamicPPL.unflatten(f.varinfo, M.minimizer)
@set! f.varinfo = invlink!!(f.varinfo, model)
Setfield.@set! f.varinfo = DynamicPPL.unflatten(f.varinfo, M.minimizer)
Setfield.@set! f.varinfo = DynamicPPL.invlink!!(f.varinfo, model)
vals = DynamicPPL.getparams(f)
@set! f.varinfo = link!!(f.varinfo, model)
Setfield.@set! f.varinfo = DynamicPPL.link!!(f.varinfo, model)

# Make one transition to get the parameter names.
ts = [Turing.Inference.Transition(
Expand All @@ -275,3 +275,5 @@ function _optimize(

return ModeResult(vmat, M, -M.minimum, f)
end

end # module
41 changes: 24 additions & 17 deletions src/Turing.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ import AdvancedVI
using DynamicPPL: DynamicPPL, LogDensityFunction
import DynamicPPL: getspace, NoDist, NamedDist
import LogDensityProblems
import NamedArrays
import Setfield
import StatsAPI
import StatsBase

import Printf
import Random

const PROGRESS = Ref(true)
Expand Down Expand Up @@ -48,26 +54,9 @@ using .Inference
include("variational/VariationalInference.jl")
using .Variational

@init @require DynamicHMC="bbc10e6e-7c05-544b-b16e-64fede858acb" begin
@eval Inference begin
import ..DynamicHMC

if isdefined(DynamicHMC, :mcmc_with_warmup)
include("contrib/inference/dynamichmc.jl")
else
error("Please update DynamicHMC, v1.x is no longer supported")
end
end
end

include("modes/ModeEstimation.jl")
using .ModeEstimation

@init @require Optim="429524aa-4258-5aef-a3af-852621145aeb" @eval begin
include("modes/OptimInterface.jl")
export optimize
end

###########
# Exports #
###########
Expand Down Expand Up @@ -146,4 +135,22 @@ export @model, # modelling
optim_objective,
optim_function,
optim_problem

function __init__()
@static if !isdefined(Base, :get_extension)
@require Optim="429524aa-4258-5aef-a3af-852621145aeb" include("../ext/TuringOptimExt.jl")
end
@require DynamicHMC="bbc10e6e-7c05-544b-b16e-64fede858acb" begin
@eval Inference begin
import ..DynamicHMC

if isdefined(DynamicHMC, :mcmc_with_warmup)
include("contrib/inference/dynamichmc.jl")
else
error("Please update DynamicHMC, v1.x is no longer supported")
end
end
end
end

end
1 change: 0 additions & 1 deletion src/contrib/inference/abstractmcmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ getparams(transition::AdvancedHMC.Transition) = transition.z.θ
getstats(transition::AdvancedHMC.Transition) = transition.stat

getparams(transition::AdvancedMH.Transition) = transition.params
getstats(transition) = NamedTuple()

getvarinfo(f::DynamicPPL.LogDensityFunction) = f.varinfo
getvarinfo(f::LogDensityProblemsAD.ADGradientWrapper) = getvarinfo(parent(f))
Expand Down
4 changes: 1 addition & 3 deletions test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
DistributionsAD = "ced4e74d-a319-5a8a-b0ac-84af2272839c"
DynamicHMC = "bbc10e6e-7c05-544b-b16e-64fede858acb"
DynamicPPL = "366bfd00-2699-11ea-058f-f148b4cae6d8"
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Expand Down Expand Up @@ -42,14 +41,13 @@ Distributions = "0.25"
DistributionsAD = "0.6.3"
DynamicHMC = "2.1.6, 3.0"
DynamicPPL = "0.23"
FillArrays = "=1.0.0"
FiniteDifferences = "0.10.8, 0.11, 0.12"
ForwardDiff = "0.10.12 - 0.10.32, 0.10"
LogDensityProblems = "2"
LogDensityProblemsAD = "1.4"
MCMCChains = "5, 6"
NamedArrays = "0.9.4"
Optim = "0.22, 1.0"
Optim = "1"
Optimization = "3.5"
OptimizationOptimJL = "0.1"
PDMats = "0.10, 0.11"
Expand Down
10 changes: 6 additions & 4 deletions test/contrib/inference/abstractmcmc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ function initialize_mh(model)
end

@testset "External samplers" begin
@testset "AdvancedHMC.jl" begin
@turing_testset "AdvancedHMC.jl" begin
for model in DynamicPPL.TestUtils.DEMO_MODELS
# Need some functionality to initialize the sampler.
# TODO: Remove this once the constructors in the respective packages become "lazy".
Expand All @@ -52,12 +52,13 @@ end
5_000;
nadapts=1_000,
discard_initial=1_000,
rtol=0.2
rtol=0.2,
sampler_name="AdvancedHMC"
)
end
end

@testset "AdvancedMH.jl" begin
@turing_testset "AdvancedMH.jl" begin
for model in DynamicPPL.TestUtils.DEMO_MODELS
# Need some functionality to initialize the sampler.
# TODO: Remove this once the constructors in the respective packages become "lazy".
Expand All @@ -68,7 +69,8 @@ end
10_000;
discard_initial=1_000,
thinning=10,
rtol=0.2
rtol=0.2,
sampler_name="AdvancedMH"
)
end
end
Expand Down
Loading

0 comments on commit 74869f4

Please sign in to comment.