Skip to content

Commit

Permalink
Refactor ActiveSetQuadratic and SymmetricXXX (#524)
Browse files Browse the repository at this point in the history
* ActiveSetQuadratic -> ActiveSetQuadraticCachedProducts

* SymmetricLMO -> SubspaceLMO

* reduce -> deflate

* SymmetricArray -> SubspaceVector

* Cap length of test strings to 50 characters

* Suppress verbosity

* Fix docstrings for Subspace LMO & Vector

* ActiveSetQuadraticCachedProducts -> ActiveSetQuadraticProductCaching
  • Loading branch information
sebastiendesignolle authored Nov 3, 2024
1 parent c254be9 commit eb7d4c0
Show file tree
Hide file tree
Showing 35 changed files with 271 additions and 261 deletions.
4 changes: 2 additions & 2 deletions docs/src/advanced.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ See [Extra-lazification](@ref) for a complete example.

## Specialized active set for quadratic functions

If the objective function is quadratic, a considerable speedup can be obtained by using the structure `ActiveSetQuadratic`.
If the objective function is quadratic, a considerable speedup can be obtained by using the structure `ActiveSetQuadraticProductCaching`.
It relies on the storage of various scalar products to efficiently determine the best (and worst for `blended_pairwise_conditional_gradient`) atom in the active set without the need of computing many scalar products in each iteration.
The user should provide the Hessian matrix `A` as well as the linear part `b` of the function, such that:
```math
Expand Down Expand Up @@ -228,6 +228,6 @@ This subspace is the image of the Reynolds operator defined by
\mathcal{R}(x)=\frac{1}{|G|}\sum_{g\in G}g\cdot x.
```

In practice, the type `SymmetricLMO` allows the user to provide the Reynolds operator $\mathcal{R}$ as well as its adjoint $\mathcal{R}^\ast$.
In practice, the type `SubspaceLMO` allows the user to provide the Reynolds operator $\mathcal{R}$ as well as its adjoint $\mathcal{R}^\ast$.
The gradient is symmetrised with $\mathcal{R}^\ast$, then passed to the non-symmetric LMO, and the resulting output is symmetrised with $\mathcal{R}$.
In many cases, the gradient is already symmetric so that `reynolds_adjoint(gradient, lmo) = gradient` is a fast and valid choice.
2 changes: 1 addition & 1 deletion examples/birkhoff_polytope.jl
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ FrankWolfe.benchmark_oracles(
x -> cf(x, xp, normxp2),
(str, x) -> cgrad!(str, x, xp),
lmo,
FrankWolfe.ActiveSetQuadratic([(1.0, collect(x0))], 2I/n^2, -2xp/n^2); # surprisingly faster and more memory efficient with collect
FrankWolfe.ActiveSetQuadraticProductCaching([(1.0, collect(x0))], 2I/n^2, -2xp/n^2); # surprisingly faster and more memory efficient with collect
max_iteration=k,
line_search=FrankWolfe.Shortstep(2/n^2),
lazy=true,
Expand Down
32 changes: 16 additions & 16 deletions examples/birkhoff_polytope_symmetric.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ x0 = FrankWolfe.compute_extreme_point(lmo_nat, randn(n, n))
x -> cf(x, xp, normxp2),
(str, x) -> cgrad!(str, x, xp),
lmo_nat,
FrankWolfe.ActiveSetQuadratic([(1.0, x0)], 2I/n^2, -2xp/n^2);
FrankWolfe.ActiveSetQuadraticProductCaching([(1.0, x0)], 2I/n^2, -2xp/n^2);
max_iteration=k,
line_search=FrankWolfe.Shortstep(2/n^2),
lazy=true,
Expand All @@ -48,15 +48,15 @@ x0 = FrankWolfe.compute_extreme_point(lmo_nat, randn(n, n))
# here the problem is invariant under mirror symmetry around the diagonal and the anti-diagonal
# each solution of the LMO can then be added to the active set together with its orbit
# on top of that, the effective dimension of the space is reduced
# the following function constructs the functions `reduce` and `inflate` needed for SymmetricLMO
# `reduce` maps a matrix to the invariant vector space
# the following function constructs the functions `deflate` and `inflate` needed for SubspaceLMO
# `deflate` maps a matrix to the invariant vector space
# `inflate` maps a vector in this space back to a matrix
# using `FrankWolfe.SymmetricArray` is a convenience to avoid reallocating the result of `inflate`
function build_reduce_inflate(p::Matrix{T}) where {T <: Number}
# using `FrankWolfe.SubspaceVector` is a convenience to avoid reallocating the result of `inflate`
function build_deflate_inflate(p::Matrix{T}) where {T <: Number}
n = size(p, 1)
@assert n == size(p, 2) # square matrix
dimension = floor(Int, (n+1)^2 / 4) # reduced dimension
function reduce(A::AbstractMatrix{T}, lmo)
dimension = floor(Int, (n+1)^2 / 4) # deflated dimension
function deflate(A::AbstractMatrix{T}, lmo)
vec = Vector{T}(undef, dimension)
cnt = 0
@inbounds for i in 1:(n+1)÷2, j in i:n+1-i
Expand All @@ -75,9 +75,9 @@ function build_reduce_inflate(p::Matrix{T}) where {T <: Number}
end
end
end
return FrankWolfe.SymmetricArray(A, vec)
return FrankWolfe.SubspaceVector(A, vec)
end
function inflate(x::FrankWolfe.SymmetricArray, lmo)
function inflate(x::FrankWolfe.SubspaceVector, lmo)
cnt = 0
@inbounds for i in 1:(n+1)÷2, j in i:n+1-i
cnt += 1
Expand All @@ -102,22 +102,22 @@ function build_reduce_inflate(p::Matrix{T}) where {T <: Number}
end
return x.data
end
return reduce, inflate
return deflate, inflate
end

reduce, inflate = build_reduce_inflate(xpi)
const rxp = reduce(xpi, nothing)
@assert dot(rxp, rxp) normxp2 # should be correct thanks to the factors sqrt(2) and 2 in reduce and inflate
deflate, inflate = build_deflate_inflate(xpi)
const rxp = deflate(xpi, nothing)
@assert dot(rxp, rxp) normxp2 # should be correct thanks to the factors sqrt(2) and 2 in deflate and inflate

lmo_sym = FrankWolfe.SymmetricLMO(lmo_nat, reduce, inflate)
lmo_sym = FrankWolfe.SubspaceLMO(lmo_nat, deflate, inflate)

rx0 = FrankWolfe.compute_extreme_point(lmo_sym, reduce(sparse(randn(n, n)), nothing))
rx0 = FrankWolfe.compute_extreme_point(lmo_sym, deflate(sparse(randn(n, n)), nothing))

@time rx, rv, rprimal, rdual_gap, _ = FrankWolfe.blended_pairwise_conditional_gradient(
x -> cf(x, rxp, normxp2),
(str, x) -> cgrad!(str, x, rxp),
lmo_sym,
FrankWolfe.ActiveSetQuadratic([(1.0, rx0)], 2I/n^2, -2rxp/n^2);
FrankWolfe.ActiveSetQuadraticProductCaching([(1.0, rx0)], 2I/n^2, -2rxp/n^2);
max_iteration=k,
line_search=FrankWolfe.Shortstep(2/n^2),
lazy=true,
Expand Down
4 changes: 2 additions & 2 deletions examples/blended_pairwise_with_direct.jl
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ trajectoryBPCG_standard = []

# Just projection quadratic
trajectoryBPCG_quadratic = []
as_quad = FrankWolfe.ActiveSetQuadratic([(1.0, copy(x00))], 2 * LinearAlgebra.I, -2xp)
as_quad = FrankWolfe.ActiveSetQuadraticProductCaching([(1.0, copy(x00))], 2 * LinearAlgebra.I, -2xp)
@time x, v, primal, dual_gap, _ = FrankWolfe.blended_pairwise_conditional_gradient(
f,
grad!,
Expand All @@ -75,7 +75,7 @@ as_quad = FrankWolfe.ActiveSetQuadratic([(1.0, copy(x00))], 2 * LinearAlgebra.I,
callback=build_callback(trajectoryBPCG_quadratic),
);

as_quad = FrankWolfe.ActiveSetQuadratic([(1.0, copy(x00))], 2 * LinearAlgebra.I, -2xp)
as_quad = FrankWolfe.ActiveSetQuadraticProductCaching([(1.0, copy(x00))], 2 * LinearAlgebra.I, -2xp)

# with quadratic active set
trajectoryBPCG_quadratic_as = []
Expand Down
52 changes: 26 additions & 26 deletions examples/docs_12_quadratic_symmetric.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# # Accelerations for quadratic functions and symmetric problems

# This example illustrates how to exploit symmetry to reduce the dimension of the problem via `SymmetricLMO`.
# Moreover, active set based algorithms can be accelerated by using the specialized structure `ActiveSetQuadratic`.
# This example illustrates how to exploit symmetry to reduce the dimension of the problem via `SubspaceLMO`.
# Moreover, active set based algorithms can be accelerated by using the specialized structure `ActiveSetQuadraticProductCaching`.

# The specific problem we consider here comes from quantum information and some context can be found [here](https://arxiv.org/abs/2302.04721).
# Formally, we want to find the distance between a tensor of size `m^N` and the `N`-partite local polytope which is defined by its vertices
Expand Down Expand Up @@ -124,11 +124,11 @@ println() #hide

# A first acceleration can be obtained by using the active set specialized for the quadratic objective function,
# whose gradient is here ``x-p``, explaining the hessian and linear part provided as arguments.
# The speedup is obtained by pre-computing some scalar products to quickly obtained, in each iteration, the best and worst
# atoms currently in the active set.
# The speedup is obtained by pre-computing some scalar products to quickly obtained, in each iteration,
# the best and worst atoms currently in the active set.

FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_naive, FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_naive = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p)
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_naive, FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_naive = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], LinearAlgebra.I, -p)
@time FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_naive, asq_naive; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide

Expand Down Expand Up @@ -165,9 +165,9 @@ println() #hide
# a very small speedup by precomputing and storing `Combinatorics.permutations(1:N)`
# in a dedicated field of our custom LMO.

lmo_permutedims = FrankWolfe.SymmetricLMO(lmo_naive, reynolds_permutedims)
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_permutedims, FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_permutedims = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p)
lmo_permutedims = FrankWolfe.SubspaceLMO(lmo_naive, reynolds_permutedims)
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_permutedims, FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_permutedims = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], LinearAlgebra.I, -p)
@time FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_permutedims, asq_permutedims; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide

Expand Down Expand Up @@ -197,9 +197,9 @@ function build_reynolds_unique(p::Array{T, N}) where {T <: Number, N}
end
end

lmo_unique = FrankWolfe.SymmetricLMO(lmo_naive, build_reynolds_unique(p))
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_unique, FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_unique = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], LinearAlgebra.I, -p)
lmo_unique = FrankWolfe.SubspaceLMO(lmo_naive, build_reynolds_unique(p))
FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_unique, FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], LinearAlgebra.I, -p); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_unique = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], LinearAlgebra.I, -p)
@time FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo_unique, asq_unique; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide

Expand All @@ -214,7 +214,7 @@ println() #hide
# accelerations, especially for active set based algorithms in the regime where many lazy iterations are performed.
# We refer to the example `symmetric.jl` for a small benchmark with symmetric matrices.

function build_reduce_inflate(p::Array{T, N}) where {T <: Number, N}
function build_deflate_inflate(p::Array{T, N}) where {T <: Number, N}
ptol = round.(p; digits=8)
ptol[ptol .== zero(T)] .= zero(T) # transform -0.0 into 0.0 as isequal(0.0, -0.0) is false
uniquetol = unique(ptol[:])
Expand All @@ -227,25 +227,25 @@ function build_reduce_inflate(p::Array{T, N}) where {T <: Number, N}
for (i, ind) in enumerate(indices)
vec[i] = sum(A[ind]) / sqmul[i]
end
return FrankWolfe.SymmetricArray(A, vec)
end, function(x::FrankWolfe.SymmetricArray, lmo)
return FrankWolfe.SubspaceVector(A, vec)
end, function(x::FrankWolfe.SubspaceVector, lmo)
for (i, ind) in enumerate(indices)
@view(x.data[ind]) .= x.vec[i] / sqmul[i]
end
return x.data
end
end

reduce, inflate = build_reduce_inflate(p)
p_reduce = reduce(p, nothing)
x0_reduce = reduce(x0, nothing)
f_reduce = let p_reduce = p_reduce, normp2 = normp2
x -> LinearAlgebra.dot(x, x) / 2 - LinearAlgebra.dot(p_reduce, x) + normp2
deflate, inflate = build_deflate_inflate(p)
p_deflate = deflate(p, nothing)
x0_deflate = deflate(x0, nothing)
f_deflate = let p_deflate = p_deflate, normp2 = normp2
x -> LinearAlgebra.dot(x, x) / 2 - LinearAlgebra.dot(p_deflate, x) + normp2
end
grad_reduce! = let p_reduce = p_reduce
grad_deflate! = let p_deflate = p_deflate
(storage, x) -> begin
@inbounds for i in eachindex(x)
storage[i] = x[i] - p_reduce[i]
storage[i] = x[i] - p_deflate[i]
end
end
end
Expand All @@ -255,9 +255,9 @@ println() #hide
# In this simple example, their shape remains unchanged, but in general this may need some
# reformulation, which falls to the user.

lmo_reduce = FrankWolfe.SymmetricLMO(lmo_naive, reduce, inflate)
FrankWolfe.blended_pairwise_conditional_gradient(f_reduce, grad_reduce!, lmo_reduce, FrankWolfe.ActiveSetQuadratic([(one(T), x0_reduce)], LinearAlgebra.I, -p_reduce); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_reduce = FrankWolfe.ActiveSetQuadratic([(one(T), x0_reduce)], LinearAlgebra.I, -p_reduce)
@time FrankWolfe.blended_pairwise_conditional_gradient(f_reduce, grad_reduce!, lmo_reduce, asq_reduce; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
lmo_deflate = FrankWolfe.SubspaceLMO(lmo_naive, deflate, inflate)
FrankWolfe.blended_pairwise_conditional_gradient(f_deflate, grad_deflate!, lmo_deflate, FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0_deflate)], LinearAlgebra.I, -p_deflate); verbose=false, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration=10) #hide
asq_deflate = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0_deflate)], LinearAlgebra.I, -p_deflate)
@time FrankWolfe.blended_pairwise_conditional_gradient(f_deflate, grad_deflate!, lmo_deflate, asq_deflate; verbose, lazy=true, line_search=FrankWolfe.Shortstep(one(T)), max_iteration)
println() #hide

2 changes: 1 addition & 1 deletion examples/quadratic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ function benchmark_Bell(p::Array{T, 2}, quadratic::Bool; fw_method=FrankWolfe.bl
lmo = BellCorrelationsLMOHeuristic{T}(size(p, 1), zeros(T, size(p, 1)))
x0 = FrankWolfe.compute_extreme_point(lmo, -p)
if quadratic
active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], I, -p)
active_set = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], I, -p)
else
active_set = FrankWolfe.ActiveSet([(one(T), x0)])
end
Expand Down
2 changes: 1 addition & 1 deletion examples/quadratic_A.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ x0 = FrankWolfe.compute_extreme_point(lmo, zeros(T, n+1))
# active_set = FrankWolfe.ActiveSet([(1.0, x0)])

# specialized active set, automatically detecting the parameters A and b of the quadratic function f
active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], gradf)
active_set = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], gradf)

@time res = FrankWolfe.blended_pairwise_conditional_gradient(
# @time res = FrankWolfe.away_frank_wolfe(
Expand Down
4 changes: 2 additions & 2 deletions examples/reynolds.jl
Original file line number Diff line number Diff line change
Expand Up @@ -86,12 +86,12 @@ function benchmark_Bell(p::Array{T, 3}, sym::Bool; kwargs...) where {T <: Number
end
lmo = BellCorrelationsLMO{T}(size(p, 1), zeros(T, size(p, 1)))
if sym
lmo = FrankWolfe.SymmetricLMO(lmo, reynolds_permutedims, reynolds_adjoint)
lmo = FrankWolfe.SubspaceLMO(lmo, reynolds_permutedims, reynolds_adjoint)
end
x0 = FrankWolfe.compute_extreme_point(lmo, -p)
println("Output type of the LMO: ", typeof(x0))
active_set = FrankWolfe.ActiveSet([(one(T), x0)])
# active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], I, -p)
# active_set = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], I, -p)
return FrankWolfe.blended_pairwise_conditional_gradient(f, grad!, lmo, active_set; lazy=true, line_search=FrankWolfe.Shortstep(one(T)), kwargs...)
end

Expand Down
14 changes: 7 additions & 7 deletions examples/symmetric.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ function correlation_tensor_GHZ_polygon(N::Int, m::Int; type=Float64)
return res
end

function build_reduce_inflate_permutedims(p::Array{T, 2}) where {T <: Number}
function build_deflate_inflate_permutedims(p::Array{T, 2}) where {T <: Number}
n = size(p, 1)
@assert n == size(p, 2)
dimension = (n * (n + 1)) ÷ 2
Expand All @@ -72,8 +72,8 @@ function build_reduce_inflate_permutedims(p::Array{T, 2}) where {T <: Number}
vec[cnt+j] = (A[i, j] + A[j, i]) / sqrt2
end
end
return FrankWolfe.SymmetricArray(A, vec)
end, function(x::FrankWolfe.SymmetricArray, lmo)
return FrankWolfe.SubspaceVector(A, vec)
end, function(x::FrankWolfe.SubspaceVector, lmo)
cnt = 0
@inbounds for i in 1:n
x.data[i, i] = x.vec[i]
Expand All @@ -90,9 +90,9 @@ end
function benchmark_Bell(p::Array{T, 2}, sym::Bool; fw_method=FrankWolfe.blended_pairwise_conditional_gradient, kwargs...) where {T <: Number}
Random.seed!(0)
if sym
reduce, inflate = build_reduce_inflate_permutedims(p)
lmo = FrankWolfe.SymmetricLMO(BellCorrelationsLMOHeuristic{T}(size(p, 1), zeros(T, size(p, 1))), reduce, inflate)
p = reduce(p, lmo)
deflate, inflate = build_deflate_inflate_permutedims(p)
lmo = FrankWolfe.SubspaceLMO(BellCorrelationsLMOHeuristic{T}(size(p, 1), zeros(T, size(p, 1))), deflate, inflate)
p = deflate(p, lmo)
else
lmo = BellCorrelationsLMOHeuristic{T}(size(p, 1), zeros(T, size(p, 1)))
end
Expand All @@ -109,7 +109,7 @@ function benchmark_Bell(p::Array{T, 2}, sym::Bool; fw_method=FrankWolfe.blended_
end
end
x0 = FrankWolfe.compute_extreme_point(lmo, -p)
active_set = FrankWolfe.ActiveSetQuadratic([(one(T), x0)], I, -p)
active_set = FrankWolfe.ActiveSetQuadraticProductCaching([(one(T), x0)], I, -p)
res = fw_method(f, grad!, lmo, active_set; line_search=FrankWolfe.Shortstep(one(T)), lazy=true, verbose=false, max_iteration=10^2)
return fw_method(f, grad!, lmo, res[6]; line_search=FrankWolfe.Shortstep(one(T)), lazy=true, lazy_tolerance=10^6, kwargs...)
end
Expand Down
25 changes: 15 additions & 10 deletions src/abstract_oracles.jl
Original file line number Diff line number Diff line change
Expand Up @@ -258,23 +258,28 @@ function compute_extreme_point(
end

"""
SymmetricLMO{LMO, TR, TI}
SubspaceLMO{LMO, TD, TI}
Symmetric LMO for the reduction operator defined by `TR`
Subspace LMO for the deflation operator defined by `TD`
and the inflation operator defined by `TI`.
Computations are performed in the reduced subspace, and the
Computations are performed in the deflated subspace, and the
effective call of the LMO first inflates the gradient, then
use the non-symmetric LMO, and finally reduces the output.
uses the full LMO, and finally deflates the output.
Deflation operators typically project onto symmetric subspaces
or select relevant elements of a full tensor.
Scalar products should be treated carefully to ensure correctness
of the results; see also the companion `SubspaceVector` structure.
"""
struct SymmetricLMO{LMO<:LinearMinimizationOracle,TR,TI} <: LinearMinimizationOracle
struct SubspaceLMO{LMO<:LinearMinimizationOracle,TD,TI} <: LinearMinimizationOracle
lmo::LMO
reduce::TR
deflate::TD
inflate::TI
function SymmetricLMO(lmo::LMO, reduce, inflate=(x, lmo) -> x) where {LMO<:LinearMinimizationOracle}
return new{typeof(lmo),typeof(reduce),typeof(inflate)}( lmo, reduce, inflate)
function SubspaceLMO(lmo::LMO, deflate, inflate=(x, lmo) -> x) where {LMO<:LinearMinimizationOracle}
return new{typeof(lmo),typeof(deflate),typeof(inflate)}( lmo, deflate, inflate)
end
end

function compute_extreme_point(sym::SymmetricLMO, direction; kwargs...)
return sym.reduce(compute_extreme_point(sym.lmo, sym.inflate(direction, sym.lmo)), sym.lmo)
function compute_extreme_point(sym::SubspaceLMO, direction; kwargs...)
return sym.deflate(compute_extreme_point(sym.lmo, sym.inflate(direction, sym.lmo)), sym.lmo)
end
Loading

0 comments on commit eb7d4c0

Please sign in to comment.