Skip to content

Commit

Permalink
formatter, bump version (#195)
Browse files Browse the repository at this point in the history
  • Loading branch information
matbesancon authored Jun 24, 2021
1 parent 75c74eb commit ec66a6c
Show file tree
Hide file tree
Showing 8 changed files with 81 additions and 79 deletions.
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "FrankWolfe"
uuid = "f55ce6ea-fdc5-4628-88c5-0087fe54bd30"
authors = ["ZIB-IOL"]
version = "0.1.2"
version = "0.1.3"

[deps]
Arpack = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97"
Expand Down
15 changes: 8 additions & 7 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@ using LinearAlgebra
makedocs(
modules=[FrankWolfe],
sitename="FrankWolfe.jl",
format=Documenter.HTML(prettyurls = get(ENV, "CI", nothing) == "true"),
pages=["Home"=>"index.md","Examples"=>"examples2.md","References"=>"reference.md",
"Index"=>"indexlist.md"],
format=Documenter.HTML(prettyurls=get(ENV, "CI", nothing) == "true"),
pages=[
"Home" => "index.md",
"Examples" => "examples2.md",
"References" => "reference.md",
"Index" => "indexlist.md",
],
)

deploydocs(
repo = "github.com/ZIB-IOL/FrankWolfe.jl.git",
push_preview = true,
)
deploydocs(repo="github.com/ZIB-IOL/FrankWolfe.jl.git", push_preview=true)
12 changes: 6 additions & 6 deletions examples/lazy_away_step_cg-sparsity.jl
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,8 @@ x0 = deepcopy(x00)
emphasis=FrankWolfe.memory,
verbose=true,
trajectory=true,
callback=callback
);
callback=callback,
);


println("\n==> Lazy AFW.\n")
Expand All @@ -115,7 +115,7 @@ x0 = deepcopy(x00)
lazy=true,
K=1.5,
trajectory=true,
callback=callback
callback=callback,
);


Expand All @@ -136,7 +136,7 @@ x0 = deepcopy(x00)
lazy=true,
K=2.0,
trajectory=true,
callback=callback
callback=callback,
);


Expand All @@ -157,7 +157,7 @@ x0 = deepcopy(x00)
K=4.0,
lazy=true,
trajectory=true,
callback=callback
callback=callback,
);

trajectoryAdaLoc10 = []
Expand All @@ -177,7 +177,7 @@ x0 = deepcopy(x00)
K=10.0,
verbose=true,
trajectory=true,
callback=callback
callback=callback,
);


Expand Down
3 changes: 2 additions & 1 deletion examples/linear_regression.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,8 @@ params = rand(6) .- 1 # start params in (-1,0)
norm(FrankWolfe.compute_gradient(f_stoch_noisy, params))

# test that error at true parameters is lower than at randomly initialized ones
@test FrankWolfe.compute_value(f_stoch_noisy, params) > FrankWolfe.compute_value(f_stoch_noisy, params_perfect)
@test FrankWolfe.compute_value(f_stoch_noisy, params) >
FrankWolfe.compute_value(f_stoch_noisy, params_perfect)

# Vanilla Stochastic Gradient Descent with reshuffling
for idx in 1:1000
Expand Down
20 changes: 10 additions & 10 deletions src/afw.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ function away_frank_wolfe(
renorm_interval=1000,
callback=nothing,
timeout=Inf,
print_callback=FrankWolfe.print_callback
print_callback=FrankWolfe.print_callback,
)
# format string for output of the algorithm
format_string = "%6s %13s %14e %14e %14e %14e %14e %14i\n"

# format string for output of the algorithm
format_string = "%6s %13s %14e %14e %14e %14e %14e %14i\n"

t = 0
dual_gap = Inf
Expand Down Expand Up @@ -74,7 +74,7 @@ function away_frank_wolfe(
end
headers =
("Type", "Iteration", "Primal", "Dual", "Dual Gap", "Time", "It/sec", "#ActiveSet")
print_callback(headers,format_string,print_header=true)
print_callback(headers, format_string, print_header=true)
end

# likely not needed anymore as now the iterates are provided directly via the active set
Expand Down Expand Up @@ -114,7 +114,7 @@ function away_frank_wolfe(
end

#####################

# compute current iterate from active set
x = compute_active_set_iterate(active_set)
if isnothing(momentum)
Expand Down Expand Up @@ -202,7 +202,7 @@ function away_frank_wolfe(
t / tot_time,
length(active_set),
)
print_callback(rep,format_string)
print_callback(rep, format_string)
flush(stdout)
end
t += 1
Expand Down Expand Up @@ -230,7 +230,7 @@ function away_frank_wolfe(
t / ((time_ns() - time_start) / 1.0e9),
length(active_set),
)
print_callback(rep,format_string)
print_callback(rep, format_string)
flush(stdout)
end

Expand All @@ -253,8 +253,8 @@ function away_frank_wolfe(
t / ((time_ns() - time_start) / 1.0e9),
length(active_set),
)
print_callback(rep,format_string)
print_callback(nothing,format_string,print_footer=true)
print_callback(rep, format_string)
print_callback(nothing, format_string, print_footer=true)
flush(stdout)
end

Expand Down
44 changes: 22 additions & 22 deletions src/blended_cg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ function blended_conditional_gradient(
direction_storage=nothing,
callback=nothing,
timeout=Inf,
print_callback=FrankWolfe.print_callback,
print_callback=FrankWolfe.print_callback,
lmo_kwargs...,
)
# format string for output of the algorithm
format_string = "%6s %13s %14e %14e %14e %14e %14e %14i %14i\n"

# format string for output of the algorithm
format_string = "%6s %13s %14e %14e %14e %14e %14e %14i %14i\n"

t = 0
primal = Inf
Expand Down Expand Up @@ -99,7 +99,7 @@ function blended_conditional_gradient(
"#ActiveSet",
"#non-simplex",
)
print_callback(headers,format_string,print_header=true)
print_callback(headers, format_string, print_header=true)
end
if !isa(x, Union{Array,SparseVector})
x = convert(Array{float(eltype(x))}, x)
Expand Down Expand Up @@ -151,7 +151,7 @@ function blended_conditional_gradient(
callback=callback,
timeout=timeout,
print_callback=print_callback,
format_string=format_string
format_string=format_string,
)
t += num_simplex_descent_steps
#Take a FW step.
Expand Down Expand Up @@ -231,16 +231,16 @@ function blended_conditional_gradient(
t / tot_time,
length(active_set),
non_simplex_iter,
)
print_callback(rep,format_string)
)
print_callback(rep, format_string)
flush(stdout)
end

t = t + 1
non_simplex_iter += 1

end

## post-processing and cleanup after loop

# report last iteration
Expand All @@ -262,7 +262,7 @@ function blended_conditional_gradient(
length(active_set),
non_simplex_iter,
)
print_callback(rep,format_string)
print_callback(rep, format_string)
flush(stdout)
end

Expand Down Expand Up @@ -290,8 +290,8 @@ function blended_conditional_gradient(
length(active_set),
non_simplex_iter,
)
print_callback(rep,format_string)
print_callback(nothing,format_string,print_footer=true)
print_callback(rep, format_string)
print_callback(nothing, format_string, print_footer=true)
flush(stdout)
end
return x, v, primal, dual_gap, traj_data
Expand Down Expand Up @@ -334,7 +334,7 @@ function minimize_over_convex_hull!(
callback,
timeout=Inf,
print_callback=nothing,
format_string=nothing
format_string=nothing,
)
#No hessian is known, use simplex gradient descent.
if hessian === nothing
Expand All @@ -357,7 +357,7 @@ function minimize_over_convex_hull!(
callback=callback,
timeout=timeout,
print_callback=print_callback,
format_string=format_string
format_string=format_string,
)
else
x = compute_active_set_iterate(active_set)
Expand Down Expand Up @@ -415,7 +415,7 @@ function minimize_over_convex_hull!(
callback=callback,
timeout=timeout,
print_callback=print_callback,
format_string=format_string
format_string=format_string,
)
@. active_set.weights = new_weights
end
Expand All @@ -437,7 +437,7 @@ function minimize_over_convex_hull!(
callback=callback,
timeout=timeout,
print_callback=print_callback,
format_string=format_string
format_string=format_string,
)
@. active_set.weights = new_weights
end
Expand Down Expand Up @@ -583,7 +583,7 @@ function accelerated_simplex_gradient_descent_over_probability_simplex(
callback,
timeout=Inf,
print_callback=nothing,
format_string=nothing
format_string=nothing,
)
number_of_steps = 0
x = deepcopy(initial_point)
Expand Down Expand Up @@ -646,7 +646,7 @@ function accelerated_simplex_gradient_descent_over_probability_simplex(
length(initial_point),
non_simplex_iter,
)
print_callback(rep,format_string)
print_callback(rep, format_string)
flush(stdout)
end
if timeout < Inf
Expand Down Expand Up @@ -683,7 +683,7 @@ function simplex_gradient_descent_over_probability_simplex(
callback,
timeout=Inf,
print_callback=nothing,
format_string=nothing
format_string=nothing,
)
number_of_steps = 0
x = deepcopy(initial_point)
Expand Down Expand Up @@ -726,7 +726,7 @@ function simplex_gradient_descent_over_probability_simplex(
length(initial_point),
non_simplex_iter,
)
print_callback(rep,format_string)
print_callback(rep, format_string)
flush(stdout)
end
if timeout < Inf
Expand Down Expand Up @@ -815,7 +815,7 @@ function simplex_gradient_descent_over_convex_hull(
callback,
timeout=Inf,
print_callback=nothing,
format_string=nothing
format_string=nothing,
)
number_of_steps = 0
L_inner = nothing
Expand Down Expand Up @@ -961,7 +961,7 @@ function simplex_gradient_descent_over_convex_hull(
length(active_set),
non_simplex_iter,
)
print_callback(rep,format_string)
print_callback(rep, format_string)
flush(stdout)
end
if timeout < Inf
Expand Down
Loading

0 comments on commit ec66a6c

Please sign in to comment.