diff --git a/docs/Project.toml b/docs/Project.toml index f81f79319..bfec1b56b 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -24,6 +24,7 @@ OptimizationNOMAD = "2cab0595-8222-4775-b714-9828e6a9e01b" OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" OptimizationPRIMA = "72f8369c-a2ea-4298-9126-56167ce9cbc2" +OptimizationPolyalgorithms = "500b13db-7e66-49ce-bda4-eed966be6282" OptimizationSpeedMapping = "3d669222-0d7d-4eb9-8a9f-d8528b0d9b91" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" @@ -55,6 +56,7 @@ OptimizationNOMAD = "0.1" OptimizationOptimJL = "0.1" OptimizationOptimisers = "0.1" OptimizationPRIMA = "0.0.1" +OptimizationPolyalgorithms = "0.1" OptimizationSpeedMapping = "0.1" OrdinaryDiffEq = "6" ReverseDiff = ">= 1.9.0" diff --git a/docs/pages.jl b/docs/pages.jl index d5ff2a4da..6eb954360 100644 --- a/docs/pages.jl +++ b/docs/pages.jl @@ -31,6 +31,7 @@ pages = ["index.md", "Optim.jl" => "optimization_packages/optim.md", "Optimisers.jl" => "optimization_packages/optimisers.md", "PRIMA.jl" => "optimization_packages/prima.md", + "Polyalgorithms.jl" => "optimization_packages/polyopt.md", "QuadDIRECT.jl" => "optimization_packages/quaddirect.md", "SpeedMapping.jl" => "optimization_packages/speedmapping.md", ], diff --git a/docs/src/optimization_packages/polyopt.md b/docs/src/optimization_packages/polyopt.md new file mode 100644 index 000000000..b7cc02bc3 --- /dev/null +++ b/docs/src/optimization_packages/polyopt.md @@ -0,0 +1,28 @@ +# OptimizationPolyalgorithms.jl + + OptimizationPolyalgorithms.jl is a package for collecting polyalgorithms formed by fusing popular optimization solvers of different characteristics. + +## Installation: OptimizationPolyalgorithms + +To use this package, install the OptimizationPolyalgorithms package: + +```julia +import Pkg; +Pkg.add("OptimizationPolyalgorithms"); +``` +## Algorithms + +Right now we support the following polyalgorithms. + +`PolyOpt`: Runs Adam followed by BFGS for an equal number of iterations. This is useful in scientific machine learning use cases, by exploring the loss surface with the stochastic optimizer and converging to the minima faster with BFGS. + +```@example polyopt +using Optimization, OptimizationPolyalgorithms +rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 +x0 = zeros(2) +_p = [1.0, 100.0] + +optprob = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) +prob = OptimizationProblem(optprob, x0, _p) +sol = Optimization.solve(prob, PolyOpt(), maxiters = 1000) +```