Skip to content

Commit

Permalink
add optional multithreading vumps-leading boundary
Browse files Browse the repository at this point in the history
  • Loading branch information
lkdvos committed Oct 3, 2023
1 parent 11a9fca commit 7c057ac
Showing 1 changed file with 45 additions and 33 deletions.
78 changes: 45 additions & 33 deletions src/algorithms/statmech/vumps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,70 +3,82 @@
# - leading_boundary primarily works on MPSMultiline
# - they search for different eigenvalues
# - ham vumps should use Lanczos, this has to use arnoldi
# - this vumps updates entire collumns (state[:,i]); incompatible with InfiniteMPS
# - this vumps updates entire collumns (Ψ[:,i]); incompatible with InfiniteMPS
# - (a)c-prime takes a different number of arguments
# - it's very litle duplicate code, but together it'd be a bit more convoluted (primarily because of the indexing way)

"
leading_boundary(state,opp,alg,envs=environments(state,ham))
leading_boundary(Ψ,opp,alg,envs=environments(Ψ,ham))
approximate the leading eigenvector for opp
"
function leading_boundary(state::InfiniteMPS, H, alg, envs=environments(state, H))
(st, pr, de) = leading_boundary(convert(MPSMultiline, state), Multiline([H]), alg, envs)
function leading_boundary(Ψ::InfiniteMPS, H, alg, envs=environments(Ψ, H))
(st, pr, de) = leading_boundary(convert(MPSMultiline, Ψ), Multiline([H]), alg, envs)
return convert(InfiniteMPS, st), pr, de
end

function leading_boundary(state::MPSMultiline, H, alg::VUMPS, envs=environments(state, H))
function leading_boundary(Ψ::MPSMultiline, H, alg::VUMPS, envs=environments(Ψ, H))
galerkin = 1 + alg.tol_galerkin
iter = 1

temp_ACs = similar.(state.AC)
temp_Cs = similar.(state.CR)
temp_ACs = similar.(Ψ.AC)
temp_Cs = similar.(Ψ.CR)

while true
eigalg = Arnoldi(; tol=alg.tol_galerkin / 10)

@sync for col in 1:size(state, 2)
Threads.@spawn begin
H_AC = ∂∂AC($col, $state, $H, $envs)

(vals_ac, vecs_ac) = eigsolve(
H_AC, RecursiveVec($state.AC[:, col]), 1, :LM, eigalg
)
$temp_ACs[:, col] = vecs_ac[1].vecs[:]
eigalg = Arnoldi(; tol=galerkin / (4 * sqrt(iter)))

if Defaults.parallelize_sites
@sync begin
for col in 1:size(Ψ, 2)
Threads.@spawn begin
H_AC = ∂∂AC($col, $Ψ, $H, $envs)
ac = RecursiveVec($Ψ.AC[:, col])
_, acvecs = eigsolve(H_AC, ac, 1, :LM, eigalg)
$temp_ACs[:, col] = acvecs[1].vecs[:]
end

Threads.@spawn begin
H_C = ∂∂C($col, $Ψ, $H, $envs)
c = RecursiveVec($Ψ.CR[:, col])
_, cvecs = eigsolve(H_C, c, 1, :LM, eigalg)
$temp_Cs[:, col] = cvecs[1].vecs[:]
end
end
end

Threads.@spawn begin
H_C = ∂∂C($col, $state, $H, $envs)

(vals_c, vecs_c) = eigsolve(
H_C, RecursiveVec($state.CR[:, col]), 1, :LM, eigalg
)
$temp_Cs[:, col] = vecs_c[1].vecs[:]
else
for col in 1:size(Ψ, 2)
H_AC = ∂∂AC(col, Ψ, H, envs)
ac = RecursiveVec.AC[:, col])
_, acvecs = eigsolve(H_AC, ac, 1, :LM, eigalg)
temp_ACs[:, col] = acvecs[1].vecs[:]

H_C = ∂∂C(col, Ψ, H, envs)
c = RecursiveVec.CR[:, col])
_, cvecs = eigsolve(H_C, c, 1, :LM, eigalg)
temp_Cs[:, col] = cvecs[1].vecs[:]
end
end

for row in 1:size(state, 1), col in 1:size(state, 2)
for row in 1:size(Ψ, 1), col in 1:size(Ψ, 2)
QAc, _ = leftorth!(temp_ACs[row, col]; alg=TensorKit.QRpos())
Qc, _ = leftorth!(temp_Cs[row, col]; alg=TensorKit.QRpos())
temp_ACs[row, col] = QAc * adjoint(Qc)
end

state = MPSMultiline(
temp_ACs, state.CR[:, end]; tol=alg.tol_gauge, maxiter=alg.orthmaxiter
Ψ = MPSMultiline(
temp_ACs, Ψ.CR[:, end]; tol=alg.tol_gauge, maxiter=alg.orthmaxiter
)
recalculate!(envs, state)
recalculate!(envs, Ψ)

(state, envs) =
alg.finalize(iter, state, H, envs)::Tuple{typeof(state),typeof(envs)}
(Ψ, envs) =
alg.finalize(iter, Ψ, H, envs)::Tuple{typeof(Ψ),typeof(envs)}

galerkin = calc_galerkin(state, envs)
galerkin = calc_galerkin(Ψ, envs)
alg.verbose && @info "vumps @iteration $(iter) galerkin = $(galerkin)"

if (galerkin <= alg.tol_galerkin) || iter >= alg.maxiter
iter >= alg.maxiter && @warn "vumps didn't converge $(galerkin)"
return state, envs, galerkin
return Ψ, envs, galerkin
end

iter += 1
Expand Down

0 comments on commit 7c057ac

Please sign in to comment.