From 623214b44086138b73dc9f151c13d02ee1b10fa8 Mon Sep 17 00:00:00 2001 From: Vaibhav Dixit Date: Fri, 5 Jan 2024 12:35:00 -0500 Subject: [PATCH] Format and sparsearrays only 1.10 --- Project.toml | 2 +- lib/OptimizationBBO/src/OptimizationBBO.jl | 10 ++++- .../src/OptimizationCMAEvolutionStrategy.jl | 10 ++++- .../test/runtests.jl | 2 +- .../src/OptimizationEvolutionary.jl | 7 ++-- lib/OptimizationEvolutionary/test/runtests.jl | 2 +- lib/OptimizationFlux/src/OptimizationFlux.jl | 2 +- .../src/OptimizationGCMAES.jl | 8 ++-- lib/OptimizationMOI/src/nlp.jl | 4 +- .../src/OptimizationOptimJL.jl | 37 +++++++++++++------ .../src/OptimizationOptimisers.jl | 12 +++++- lib/OptimizationOptimisers/test/runtests.jl | 10 +++-- src/stats_state.jl | 8 ++-- test/diffeqfluxtests.jl | 1 - 14 files changed, 79 insertions(+), 36 deletions(-) diff --git a/Project.toml b/Project.toml index 047c8da94..057031c8d 100644 --- a/Project.toml +++ b/Project.toml @@ -58,7 +58,7 @@ ProgressLogging = "0.1" Reexport = "1.2" ReverseDiff = "1.14" SciMLBase = "2.16.3" -SparseArrays = "1.9, 1.10" +SparseArrays = "1.10" SparseDiffTools = "2.14" SymbolicIndexingInterface = "0.3" Symbolics = "5.12" diff --git a/lib/OptimizationBBO/src/OptimizationBBO.jl b/lib/OptimizationBBO/src/OptimizationBBO.jl index e27b1c08f..defeba0c9 100644 --- a/lib/OptimizationBBO/src/OptimizationBBO.jl +++ b/lib/OptimizationBBO/src/OptimizationBBO.jl @@ -117,7 +117,10 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ else n_steps = BlackBoxOptim.num_steps(trace) curr_u = decompose_trace(trace, cache.progress) - opt_state = Optimization.OptimizationState(iteration = n_steps, u = curr_u, objective = x[1], solver_state = trace) + opt_state = Optimization.OptimizationState(iteration = n_steps, + u = curr_u, + objective = x[1], + solver_state = trace) cb_call = cache.callback(opt_state, x...) end @@ -178,7 +181,10 @@ function SciMLBase.__solve(cache::Optimization.OptimizationCache{ t1 = time() opt_ret = Symbol(opt_res.stop_reason) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, time = t1 - t0, fevals = opt_res.f_calls) + stats = Optimization.OptimizationStats(; + iterations = opt_res.iterations, + time = t1 - t0, + fevals = opt_res.f_calls) SciMLBase.build_solution(cache, cache.opt, BlackBoxOptim.best_candidate(opt_res), BlackBoxOptim.best_fitness(opt_res); diff --git a/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl b/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl index c2c5651c0..43a94b99f 100644 --- a/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl +++ b/lib/OptimizationCMAEvolutionStrategy/src/OptimizationCMAEvolutionStrategy.jl @@ -22,7 +22,10 @@ function __map_optimizer_args(prob::OptimizationCache, opt::CMAEvolutionStrategy end mapped_args = (; lower = prob.lb, - upper = prob.ub, logger = CMAEvolutionStrategy.BasicLogger(prob.u0; verbosity = 0, callback = callback)) + upper = prob.ub, + logger = CMAEvolutionStrategy.BasicLogger(prob.u0; + verbosity = 0, + callback = callback)) if !isnothing(maxiters) mapped_args = (; mapped_args..., maxiter = maxiters) @@ -105,7 +108,10 @@ function SciMLBase.__solve(cache::OptimizationCache{ t1 = time() opt_ret = opt_res.stop.reason - stats = Optimization.OptimizationStats(; iterations = length(opt_res.logger.fmedian), time = t1 - t0, fevals = length(opt_res.logger.fmedian)) + stats = Optimization.OptimizationStats(; + iterations = length(opt_res.logger.fmedian), + time = t1 - t0, + fevals = length(opt_res.logger.fmedian)) SciMLBase.build_solution(cache, cache.opt, opt_res.logger.xbest[end], opt_res.logger.fbest[end]; original = opt_res, diff --git a/lib/OptimizationCMAEvolutionStrategy/test/runtests.jl b/lib/OptimizationCMAEvolutionStrategy/test/runtests.jl index 3515e8ab9..525772a87 100644 --- a/lib/OptimizationCMAEvolutionStrategy/test/runtests.jl +++ b/lib/OptimizationCMAEvolutionStrategy/test/runtests.jl @@ -12,7 +12,7 @@ using Test @test 10 * sol.objective < l1 function cb(state, args...) - if state.iteration %10 == 0 + if state.iteration % 10 == 0 println(state.u) end return false diff --git a/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl b/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl index 070ad11a2..283d8ac96 100644 --- a/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl +++ b/lib/OptimizationEvolutionary/src/OptimizationEvolutionary.jl @@ -85,7 +85,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ function _cb(trace) curr_u = decompose_trace(trace).metadata["x"][end] - opt_state = Optimization.OptimizationState(; iteration = decompose_trace(trace).iteration, + opt_state = Optimization.OptimizationState(; + iteration = decompose_trace(trace).iteration, u = curr_u, objective = x[1], solver_state = trace) @@ -132,8 +133,8 @@ function SciMLBase.__solve(cache::OptimizationCache{ end t1 = time() opt_ret = Symbol(Evolutionary.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations - , time = t1 - t0, fevals = opt_res.f_calls) + stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + time = t1 - t0, fevals = opt_res.f_calls) SciMLBase.build_solution(cache, cache.opt, Evolutionary.minimizer(opt_res), Evolutionary.minimum(opt_res); original = opt_res, diff --git a/lib/OptimizationEvolutionary/test/runtests.jl b/lib/OptimizationEvolutionary/test/runtests.jl index 46f00d0a6..e7cd3a97b 100644 --- a/lib/OptimizationEvolutionary/test/runtests.jl +++ b/lib/OptimizationEvolutionary/test/runtests.jl @@ -36,7 +36,7 @@ Random.seed!(1234) @test sol.objective < l1 function cb(state, args...) - if state.iteration %10 == 0 + if state.iteration % 10 == 0 println(state.u) end return false diff --git a/lib/OptimizationFlux/src/OptimizationFlux.jl b/lib/OptimizationFlux/src/OptimizationFlux.jl index c9410567d..c5a68c985 100644 --- a/lib/OptimizationFlux/src/OptimizationFlux.jl +++ b/lib/OptimizationFlux/src/OptimizationFlux.jl @@ -102,7 +102,7 @@ function SciMLBase.__solve(cache::OptimizationCache{ end t1 = time() - stats = Optimization.OptimizationStats(; iterations = maxiters, + stats = Optimization.OptimizationStats(; iterations = maxiters, time = t1 - t0, fevals = maxiters, gevals = maxiters) SciMLBase.build_solution(cache, opt, θ, x[1], stats = stats) # here should be build_solution to create the output message diff --git a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl index 11be7b9f2..a55f08e8a 100644 --- a/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl +++ b/lib/OptimizationGCMAES/src/OptimizationGCMAES.jl @@ -114,12 +114,12 @@ function SciMLBase.__solve(cache::OptimizationCache{ cache.ub; opt_args...) end t1 = time() - stats = Optimization.OptimizationStats(; iterations = maxiters === nothing ? 0 : maxiters, - time = t1 - t0) + stats = Optimization.OptimizationStats(; + iterations = maxiters === nothing ? 0 : maxiters, + time = t1 - t0) SciMLBase.build_solution(cache, cache.opt, opt_xmin, opt_fmin; retcode = Symbol(Bool(opt_ret)), - stats = stats - ) + stats = stats) end end diff --git a/lib/OptimizationMOI/src/nlp.jl b/lib/OptimizationMOI/src/nlp.jl index 504799f39..4ad2ed9af 100644 --- a/lib/OptimizationMOI/src/nlp.jl +++ b/lib/OptimizationMOI/src/nlp.jl @@ -218,7 +218,9 @@ function MOI.eval_objective(evaluator::MOIOptimizationNLPEvaluator, x) else l = evaluator.f(x, evaluator.p) evaluator.iteration += 1 - state = Optimization.OptimizationState(iteration = evaluator.iteration, u = x, objective = l[1]) + state = Optimization.OptimizationState(iteration = evaluator.iteration, + u = x, + objective = l[1]) evaluator.callback(state, l) return l end diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index cbbe05af1..5b4fbe483 100644 --- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -133,8 +133,12 @@ function SciMLBase.__solve(cache::OptimizationCache{ error("Use OptimizationFunction to pass the derivatives or automatically generate them with one of the autodiff backends") function _cb(trace) - θ = cache.opt isa Optim.NelderMead ? decompose_trace(trace).metadata["centroid"] : decompose_trace(trace).metadata["x"] - opt_state = Optimization.OptimizationState(iteration = trace.iteration, u = θ, objective = x[1], solver_state = trace) + θ = cache.opt isa Optim.NelderMead ? decompose_trace(trace).metadata["centroid"] : + decompose_trace(trace).metadata["x"] + opt_state = Optimization.OptimizationState(iteration = trace.iteration, + u = θ, + objective = x[1], + solver_state = trace) cb_call = cache.callback(opt_state, x...) if !(cb_call isa Bool) error("The callback should return a boolean `halt` for whether to stop the optimization process.") @@ -208,8 +212,9 @@ function SciMLBase.__solve(cache::OptimizationCache{ opt_res = Optim.optimize(optim_f, cache.u0, cache.opt, opt_args) t1 = time() opt_ret = Symbol(Optim.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, - time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, hevals = opt_res.h_calls) + stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, + hevals = opt_res.h_calls) SciMLBase.build_solution(cache, cache.opt, opt_res.minimizer, cache.sense === Optimization.MaxSense ? -opt_res.minimum : @@ -247,8 +252,13 @@ function SciMLBase.__solve(cache::OptimizationCache{ cur, state = iterate(cache.data) function _cb(trace) - θ = !(cache.opt isa Optim.SAMIN) && cache.opt.method == Optim.NelderMead() ? decompose_trace(trace).metadata["centroid"] : decompose_trace(trace).metadata["x"] - opt_state = Optimization.OptimizationState(iteration = trace.iteration, u = θ, objective = x[1], solver_state = trace) + θ = !(cache.opt isa Optim.SAMIN) && cache.opt.method == Optim.NelderMead() ? + decompose_trace(trace).metadata["centroid"] : + decompose_trace(trace).metadata["x"] + opt_state = Optimization.OptimizationState(iteration = trace.iteration, + u = θ, + objective = x[1], + solver_state = trace) cb_call = cache.callback(opt_state, x...) if !(cb_call isa Bool) error("The callback should return a boolean `halt` for whether to stop the optimization process.") @@ -297,8 +307,9 @@ function SciMLBase.__solve(cache::OptimizationCache{ opt_res = Optim.optimize(optim_f, cache.lb, cache.ub, cache.u0, cache.opt, opt_args) t1 = time() opt_ret = Symbol(Optim.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, - time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, hevals = opt_res.h_calls) + stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, + hevals = opt_res.h_calls) SciMLBase.build_solution(cache, cache.opt, opt_res.minimizer, opt_res.minimum; original = opt_res, retcode = opt_ret, stats = stats) @@ -331,7 +342,10 @@ function SciMLBase.__solve(cache::OptimizationCache{ cur, state = iterate(cache.data) function _cb(trace) - opt_state = Optimization.OptimizationState(iteration = trace.iteration, u = decompose_trace(trace).metadata["x"], objective = x[1], solver_state = trace) + opt_state = Optimization.OptimizationState(iteration = trace.iteration, + u = decompose_trace(trace).metadata["x"], + objective = x[1], + solver_state = trace) cb_call = cache.callback(opt_state, x...) if !(cb_call isa Bool) error("The callback should return a boolean `halt` for whether to stop the optimization process.") @@ -412,8 +426,9 @@ function SciMLBase.__solve(cache::OptimizationCache{ opt_res = Optim.optimize(optim_f, optim_fc, cache.u0, cache.opt, opt_args) t1 = time() opt_ret = Symbol(Optim.converged(opt_res)) - stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, - time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, hevals = opt_res.h_calls) + stats = Optimization.OptimizationStats(; iterations = opt_res.iterations, + time = t1 - t0, fevals = opt_res.f_calls, gevals = opt_res.g_calls, + hevals = opt_res.h_calls) SciMLBase.build_solution(cache, cache.opt, opt_res.minimizer, opt_res.minimum; original = opt_res, retcode = opt_ret, diff --git a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl index aff5ff625..d34299b27 100644 --- a/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl +++ b/lib/OptimizationOptimisers/src/OptimizationOptimisers.jl @@ -67,7 +67,11 @@ function SciMLBase.__solve(cache::OptimizationCache{ for (i, d) in enumerate(data) cache.f.grad(G, θ, d...) x = cache.f(θ, cache.p, d...) - opt_state = Optimization.OptimizationState(iteration = i, u = θ, objective = x[1], gradient = G, solver_state = state) + opt_state = Optimization.OptimizationState(iteration = i, + u = θ, + objective = x[1], + gradient = G, + solver_state = state) cb_call = cache.callback(opt_state, x...) if !(cb_call isa Bool) error("The callback should return a boolean `halt` for whether to stop the optimization process. Please see the `solve` documentation for information.") @@ -88,7 +92,11 @@ function SciMLBase.__solve(cache::OptimizationCache{ x = min_err θ = min_θ cache.f.grad(G, θ, d...) - opt_state = Optimization.OptimizationState(iteration = i, u = θ, objective = x[1], gradient = G, solver_state = state) + opt_state = Optimization.OptimizationState(iteration = i, + u = θ, + objective = x[1], + gradient = G, + solver_state = state) cache.callback(opt_state, x...) break end diff --git a/lib/OptimizationOptimisers/test/runtests.jl b/lib/OptimizationOptimisers/test/runtests.jl index 295c15f8f..bb91c07d5 100644 --- a/lib/OptimizationOptimisers/test/runtests.jl +++ b/lib/OptimizationOptimisers/test/runtests.jl @@ -61,11 +61,15 @@ using Zygote prob = OptimizationProblem(optprob, x0, _p) function callback(state, l) - Optimisers.adjust!(state.solver_state, 0.1/state.iteration) + Optimisers.adjust!(state.solver_state, 0.1 / state.iteration) return false end - sol = solve(prob, Optimisers.Adam(0.1), maxiters = 1000, progress = false, callback = callback) + sol = solve(prob, + Optimisers.Adam(0.1), + maxiters = 1000, + progress = false, + callback = callback) end - @test_throws ArgumentError sol = solve(prob, Optimisers.Adam()) + @test_throws ArgumentError sol=solve(prob, Optimisers.Adam()) end diff --git a/src/stats_state.jl b/src/stats_state.jl index 1706a5a2c..775e78c28 100644 --- a/src/stats_state.jl +++ b/src/stats_state.jl @@ -7,8 +7,9 @@ struct OptimizationStats hevals::Int end -OptimizationStats(; iterations = 0, time = 0.0, fevals = 0, gevals = 0, hevals = 0) = +function OptimizationStats(; iterations = 0, time = 0.0, fevals = 0, gevals = 0, hevals = 0) OptimizationStats(iterations, time, fevals, gevals, hevals) +end struct OptimizationState{X, O, G, H, S} iteration::Int @@ -19,6 +20,7 @@ struct OptimizationState{X, O, G, H, S} solver_state::S end -OptimizationState(; iteration = 0, u = nothing, objective = nothing, - gradient = nothing, hessian = nothing, solver_state = nothing) = +function OptimizationState(; iteration = 0, u = nothing, objective = nothing, + gradient = nothing, hessian = nothing, solver_state = nothing) OptimizationState(iteration, u, objective, gradient, hessian, solver_state) +end diff --git a/test/diffeqfluxtests.jl b/test/diffeqfluxtests.jl index ace9984e9..f45514536 100644 --- a/test/diffeqfluxtests.jl +++ b/test/diffeqfluxtests.jl @@ -35,7 +35,6 @@ end iter = 0 callback = function (state, l, pred) - display(l) # using `remake` to re-create our `prob` with current parameters `p`