Skip to content

Commit

Permalink
Merge branch 'master' into compathelper/new_version/2024-01-06-00-21-…
Browse files Browse the repository at this point in the history
…36-915-01305798762
  • Loading branch information
Vaibhavdixit02 authored Jan 7, 2024
2 parents 48eac56 + b51d717 commit 0c0bfdf
Show file tree
Hide file tree
Showing 16 changed files with 76 additions and 45 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
[![Global Docs](https://img.shields.io/badge/docs-SciML-blue.svg)](https://docs.sciml.ai/Optimization/stable/)

[![codecov](https://codecov.io/gh/SciML/Optimization.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/SciML/Optimization.jl)
[![Build Status](https://github.com/SciML/Optimization.jl/workflows/CI/badge.svg)](https://github.com/SciML/Optimization.jl/actions/workflows/CI.yml?query=branch%3Amaster++)
[![Build Status](https://github.com/SciML/Optimization.jl/workflows/CI/badge.svg)](https://github.com/SciML/Optimization.jl/actions?query=workflow%3ACI)

[![ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://img.shields.io/badge/ColPrac-Contributor%27s%20Guide-blueviolet)](https://github.com/SciML/ColPrac)
[![SciML Code Style](https://img.shields.io/static/v1?label=code%20style&message=SciML&color=9558b2&labelColor=389826)](https://github.com/SciML/SciMLStyle)
Expand Down
8 changes: 4 additions & 4 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,16 @@ Juniper = "0.9"
ModelingToolkit = ">= 8.11.0"
NLopt = "0.6, 1"
Optimization = "3"
OptimizationBBO = "0.1"
OptimizationBBO = "0.1, 0.2"
OptimizationCMAEvolutionStrategy = "0.1"
OptimizationEvolutionary = "0.1"
OptimizationGCMAES = "0.1"
OptimizationGCMAES = "0.1, 0.2"
OptimizationMOI = "0.1, 0.2"
OptimizationMetaheuristics = "0.1, 0.2"
OptimizationMultistartOptimization = "0.1"
OptimizationMultistartOptimization = "0.1, 0.2"
OptimizationNLopt = "0.1"
OptimizationNOMAD = "0.1"
OptimizationOptimJL = "0.1"
OptimizationOptimJL = "0.1, 0.2"
OptimizationOptimisers = "0.1"
OptimizationPRIMA = "0.0.1"
OptimizationPolyalgorithms = "0.1"
Expand Down
2 changes: 1 addition & 1 deletion docs/src/tutorials/minibatch.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ function dudt_(u, p, t)
re(p)(u) .* u
end
callback = function (p, l, pred; doplot = false) #callback function to observe training
callback = function (state, l, pred; doplot = false) #callback function to observe training
display(l)
# plot current prediction against data
if doplot
Expand Down
5 changes: 3 additions & 2 deletions lib/OptimizationBBO/src/OptimizationBBO.jl
Original file line number Diff line number Diff line change
Expand Up @@ -117,10 +117,11 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{
else
n_steps = BlackBoxOptim.num_steps(trace)
curr_u = decompose_trace(trace, cache.progress)
opt_state = Optimization.OptimizationState(iteration = n_steps,
opt_state = Optimization.OptimizationState(;
iter = n_steps,
u = curr_u,
objective = x[1],
solver_state = trace)
original = trace)
cb_call = cache.callback(opt_state, x...)
end

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ function SciMLBase.__solve(cache::OptimizationCache{

function _cb(opt, y, fvals, perm)
curr_u = opt.logger.xbest[end]
opt_state = Optimization.OptimizationState(; iteration = length(opt.logger.fmedian),
opt_state = Optimization.OptimizationState(; iter = length(opt.logger.fmedian),
u = curr_u,
objective = opt.logger.fbest[end],
solver_state = opt.logger)
original = opt.logger)

cb_call = cache.callback(opt_state, x...)
if !(cb_call isa Bool)
Expand Down
2 changes: 1 addition & 1 deletion lib/OptimizationCMAEvolutionStrategy/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ using Test
@test 10 * sol.objective < l1

function cb(state, args...)
if state.iteration % 10 == 0
if state.iter % 10 == 0
println(state.u)
end
return false
Expand Down
4 changes: 2 additions & 2 deletions lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,10 @@ function SciMLBase.__solve(cache::OptimizationCache{
function _cb(trace)
curr_u = decompose_trace(trace).metadata["x"][end]
opt_state = Optimization.OptimizationState(;
iteration = decompose_trace(trace).iteration,
iter = decompose_trace(trace).iteration,
u = curr_u,
objective = x[1],
solver_state = trace)
original = trace)
cb_call = cache.callback(opt_state, trace.value...)
if !(cb_call isa Bool)
error("The callback should return a boolean `halt` for whether to stop the optimization process.")
Expand Down
2 changes: 1 addition & 1 deletion lib/OptimizationEvolutionary/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Random.seed!(1234)
@test sol.objective < l1

function cb(state, args...)
if state.iteration % 10 == 0
if state.iter % 10 == 0
println(state.u)
end
return false
Expand Down
10 changes: 5 additions & 5 deletions lib/OptimizationFlux/src/OptimizationFlux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@ function SciMLBase.__solve(cache::OptimizationCache{
for (i, d) in enumerate(data)
cache.f.grad(G, θ, d...)
x = cache.f(θ, cache.p, d...)
opt_state = Optimization.OptimizationState(; iteration = i,
opt_state = Optimization.OptimizationState(; iter = i,
u = θ,
objective = x[1],
solver_state = opt)
original = opt)
cb_call = cache.callback(opt_state, x...)
if !(cb_call isa Bool)
error("The callback should return a boolean `halt` for whether to stop the optimization process. Please see the sciml_train documentation for information.")
Expand All @@ -85,14 +85,14 @@ function SciMLBase.__solve(cache::OptimizationCache{
min_err = x
min_θ = copy(θ)
end
if i == maxiters #Last iteration, revert to best.
if i == maxiters #Last iter, revert to best.
opt = min_opt
x = min_err
θ = min_θ
opt_state = Optimization.OptimizationState(; iteration = i,
opt_state = Optimization.OptimizationState(; iter = i,
u = θ,
objective = x[1],
solver_state = opt)
original = opt)
cache.callback(opt_state, x...)
break
end
Expand Down
2 changes: 1 addition & 1 deletion lib/OptimizationFlux/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ using Test
end

function cb(state, args...)
if state.iteration % 10 == 0
if state.iter % 10 == 0
println(state.u)
end
return false
Expand Down
2 changes: 1 addition & 1 deletion lib/OptimizationMOI/src/nlp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ function MOI.eval_objective(evaluator::MOIOptimizationNLPEvaluator, x)
else
l = evaluator.f(x, evaluator.p)
evaluator.iteration += 1
state = Optimization.OptimizationState(iteration = evaluator.iteration,
state = Optimization.OptimizationState(iter = evaluator.iteration,
u = x,
objective = l[1])
evaluator.callback(state, l)
Expand Down
12 changes: 6 additions & 6 deletions lib/OptimizationOptimJL/src/OptimizationOptimJL.jl
Original file line number Diff line number Diff line change
Expand Up @@ -135,10 +135,10 @@ function SciMLBase.__solve(cache::OptimizationCache{
function _cb(trace)
θ = cache.opt isa Optim.NelderMead ? decompose_trace(trace).metadata["centroid"] :
decompose_trace(trace).metadata["x"]
opt_state = Optimization.OptimizationState(iteration = trace.iteration,
opt_state = Optimization.OptimizationState(iter = trace.iteration,
u = θ,
objective = x[1],
solver_state = trace)
original = trace)
cb_call = cache.callback(opt_state, x...)
if !(cb_call isa Bool)
error("The callback should return a boolean `halt` for whether to stop the optimization process.")
Expand Down Expand Up @@ -255,10 +255,10 @@ function SciMLBase.__solve(cache::OptimizationCache{
θ = !(cache.opt isa Optim.SAMIN) && cache.opt.method == Optim.NelderMead() ?
decompose_trace(trace).metadata["centroid"] :
decompose_trace(trace).metadata["x"]
opt_state = Optimization.OptimizationState(iteration = trace.iteration,
opt_state = Optimization.OptimizationState(iter = trace.iteration,
u = θ,
objective = x[1],
solver_state = trace)
original = trace)
cb_call = cache.callback(opt_state, x...)
if !(cb_call isa Bool)
error("The callback should return a boolean `halt` for whether to stop the optimization process.")
Expand Down Expand Up @@ -342,10 +342,10 @@ function SciMLBase.__solve(cache::OptimizationCache{
cur, state = iterate(cache.data)

function _cb(trace)
opt_state = Optimization.OptimizationState(iteration = trace.iteration,
opt_state = Optimization.OptimizationState(iter = trace.iteration,
u = decompose_trace(trace).metadata["x"],
objective = x[1],
solver_state = trace)
original = trace)
cb_call = cache.callback(opt_state, x...)
if !(cb_call isa Bool)
error("The callback should return a boolean `halt` for whether to stop the optimization process.")
Expand Down
14 changes: 7 additions & 7 deletions lib/OptimizationOptimisers/src/OptimizationOptimisers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,11 @@ function SciMLBase.__solve(cache::OptimizationCache{
for (i, d) in enumerate(data)
cache.f.grad(G, θ, d...)
x = cache.f(θ, cache.p, d...)
opt_state = Optimization.OptimizationState(iteration = i,
opt_state = Optimization.OptimizationState(iter = i,
u = θ,
objective = x[1],
gradient = G,
solver_state = state)
grad = G,
original = state)
cb_call = cache.callback(opt_state, x...)
if !(cb_call isa Bool)
error("The callback should return a boolean `halt` for whether to stop the optimization process. Please see the `solve` documentation for information.")
Expand All @@ -87,16 +87,16 @@ function SciMLBase.__solve(cache::OptimizationCache{
min_err = x
min_θ = copy(θ)
end
if i == maxiters #Last iteration, revert to best.
if i == maxiters #Last iter, revert to best.
opt = min_opt
x = min_err
θ = min_θ
cache.f.grad(G, θ, d...)
opt_state = Optimization.OptimizationState(iteration = i,
opt_state = Optimization.OptimizationState(iter = i,
u = θ,
objective = x[1],
gradient = G,
solver_state = state)
grad = G,
original = state)
cache.callback(opt_state, x...)
break
end
Expand Down
6 changes: 3 additions & 3 deletions lib/OptimizationOptimisers/src/sophia.jl
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,11 @@ function SciMLBase.__solve(cache::OptimizationCache{
for (i, d) in enumerate(data)
f.grad(gₜ, θ, d...)
x = cache.f(θ, cache.p, d...)
opt_state = Optimization.OptimizationState(; iteration = i,
opt_state = Optimization.OptimizationState(; iter = i,
u = θ,
objective = first(x),
gradient = gₜ,
solver_state = nothing)
grad = gₜ,
original = nothing)
cb_call = cache.callback(θ, x...)
if !(cb_call isa Bool)
error("The callback should return a boolean `halt` for whether to stop the optimization process. Please see the sciml_train documentation for information.")
Expand Down
2 changes: 1 addition & 1 deletion lib/OptimizationOptimisers/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ using Zygote

prob = OptimizationProblem(optprob, x0, _p)
function callback(state, l)
Optimisers.adjust!(state.solver_state, 0.1 / state.iteration)
Optimisers.adjust!(state.original, 0.1 / state.iter)
return false
end
sol = solve(prob,
Expand Down
44 changes: 37 additions & 7 deletions src/stats_state.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,20 @@
"""
$(TYPEDEF)
Stores the optimization run's statistics that is returned
in the `stats` field of the `OptimizationResult`.
## Fields
- `iterations`: number of iterations
- `time`: time taken to run the solver
- `fevals`: number of function evaluations
- `gevals`: number of gradient evaluations
- `hevals`: number of hessian evaluations
Default values for all the field are set to 0 and hence even when
you might expect non-zero values due to unavilability of the information
from the solver it would be 0.
"""
struct OptimizationStats
iterations::Int
time::Float64
Expand All @@ -11,16 +27,30 @@ function OptimizationStats(; iterations = 0, time = 0.0, fevals = 0, gevals = 0,
OptimizationStats(iterations, time, fevals, gevals, hevals)
end

"""
$(TYPEDEF)
Stores the optimization run's state at the current iteration
and is passed to the callback function as the first argument.
## Fields
- `iter`: current iteration
- `u`: current solution
- `objective`: current objective value
- `gradient`: current gradient
- `hessian`: current hessian
- `original`: if the solver has its own state object then it is stored here
"""
struct OptimizationState{X, O, G, H, S}
iteration::Int
iter::Int
u::X
objective::O
gradient::G
hessian::H
solver_state::S
grad::G
hess::H
original::S
end

function OptimizationState(; iteration = 0, u = nothing, objective = nothing,
gradient = nothing, hessian = nothing, solver_state = nothing)
OptimizationState(iteration, u, objective, gradient, hessian, solver_state)
function OptimizationState(; iter = 0, u = nothing, objective = nothing,
grad = nothing, hess = nothing, original = nothing)
OptimizationState(iter, u, objective, grad, hess, original)
end

0 comments on commit 0c0bfdf

Please sign in to comment.