Skip to content

Commit

Permalink
added open loop parameterization (#428)
Browse files Browse the repository at this point in the history
* added open loop parameterization

* added test

* replace diagonal with sparse for 1.6 compat

* import

* dense matrix

* relax lenght
  • Loading branch information
matbesancon authored Sep 29, 2023
1 parent 4a4540b commit e5af3d6
Show file tree
Hide file tree
Showing 2 changed files with 107 additions and 7 deletions.
23 changes: 16 additions & 7 deletions src/linesearch.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,22 @@ function perform_line_search end
build_linesearch_workspace(::LineSearchMethod, x, gradient) = nothing

"""
Computes step size: `2/(2 + t)` at iteration `t`.
Computes step size: `l/(l + t)` at iteration `t`, given `l > 0`.
Using `l ≥ 4` is advised only for strongly convex sets, see:
> Acceleration of Frank-Wolfe Algorithms with Open-Loop Step-Sizes, Wirth, Kerdreux, Pokutta, 2023.
"""
struct Agnostic{T<:Real} <: LineSearchMethod end
struct Agnostic{T<:Real} <: LineSearchMethod
l::Int
end

Agnostic() = Agnostic{Float64}()
Agnostic() = Agnostic{Float64}(2)
Agnostic(l::Int) = Agnostic{Float64}(l)

Agnostic{T}() where {T} = Agnostic{T}(2)

perform_line_search(
::Agnostic{<:Rational},
ls::Agnostic{<:Rational},
t,
f,
g!,
Expand All @@ -41,9 +49,10 @@ perform_line_search(
gamma_max,
workspace,
memory_mode::MemoryEmphasis,
) = 2 // (t + 2)
) = ls.l // (t + ls.l)

perform_line_search(
::Agnostic{T},
ls::Agnostic{T},
t,
f,
g!,
Expand All @@ -53,7 +62,7 @@ perform_line_search(
gamma_max,
workspace,
memory_mode::MemoryEmphasis,
) where {T} = T(2 / (t + 2))
) where {T} = T(ls.l / (t + ls.l))

Base.print(io::IO, ::Agnostic) = print(io, "Agnostic")

Expand Down
91 changes: 91 additions & 0 deletions test/trajectory_tests/open_loop_parametric.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
using FrankWolfe

using Test
using LinearAlgebra

@testset "Open-loop FW on polytope" begin
n = Int(1e2)
k = Int(1e4)

xp = ones(n)
f(x) = norm(x - xp)^2
function grad!(storage, x)
@. storage = 2 * (x - xp)
end

lmo = FrankWolfe.KSparseLMO(40, 1.0)

x0 = FrankWolfe.compute_extreme_point(lmo, zeros(n))

res_2 = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(2),
print_iter=k / 10,
epsilon=1e-5,
verbose=true,
trajectory=true,
)

res_10 = FrankWolfe.frank_wolfe(
f,
grad!,
lmo,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(10),
print_iter=k / 10,
epsilon=1e-5,
verbose=true,
trajectory=true,
)

@test res_2[4] 0.004799839951985518
@test res_10[4] 0.02399919272834694

# strongly convex set
xp2 = 10 * ones(n)
diag_term = 100 * rand(n)
covariance_matrix = zeros(n,n) + LinearAlgebra.Diagonal(diag_term)
lmo2 = FrankWolfe.EllipsoidLMO(covariance_matrix)

f2(x) = norm(x - xp2)^2
function grad2!(storage, x)
@. storage = 2 * (x - xp2)
end

x0 = FrankWolfe.compute_extreme_point(lmo2, randn(n))

res_2 = FrankWolfe.frank_wolfe(
f2,
grad2!,
lmo2,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(2),
print_iter=k / 10,
epsilon=1e-5,
verbose=true,
trajectory=true,
)

res_10 = FrankWolfe.frank_wolfe(
f2,
grad2!,
lmo2,
copy(x0),
max_iteration=k,
line_search=FrankWolfe.Agnostic(10),
print_iter=k / 10,
epsilon=1e-5,
verbose=true,
trajectory=true,
)

@test length(res_10[end]) <= 8
@test length(res_2[end]) <= 73

end

0 comments on commit e5af3d6

Please sign in to comment.