diff --git a/README.md b/README.md index 82505f7..ab0802c 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ function MPO_new(os::OpSum, sites::Vector{<:Index}; kwargs...)::MPO ``` The optional keyword arguments are -* `basisOpCacheVec`: A list of operators to use as a basis for each site. The operators on each site are expressed as one of these basis operators. +* `basis_op_cache_vec`: A list of operators to use as a basis for each site. The operators on each site are expressed as one of these basis operators. * `tol::Real`: The tolerance used in the sparse QR decomposition (which is done by SPQR). The default value is the SPQR default which is calculated separately for each QR decomposition. If you want a MPO that is accurate up to floating point errors the default tolerance should work well. If instead you want to compress the MPO the value `tol` will differ from the `cutoff` passed to `ITensor.MPO` since the truncation method is completely different. If you want to replicate the same truncation behavior first construct the MPO with a suitably small (or default) `tol` and then use `ITensors.truncate!`. ## Examples: Fermi-Hubbard Hamiltonian in Real Space diff --git a/examples/electronic-structure.jl b/examples/electronic-structure.jl index e1667ba..b225be0 100644 --- a/examples/electronic-structure.jl +++ b/examples/electronic-structure.jl @@ -71,12 +71,12 @@ function electronic_structure( "Nup * Ndn", ] - opCacheVec = [ + op_cache_vec = [ [OpInfo(ITensors.Op(name, n), sites[n]) for name in operatorNames] for n in eachindex(sites) ] - return @time "\tConstrucing MPO" MPO_new(os, sites; basisOpCacheVec=opCacheVec) + return @time "\tConstrucing MPO" MPO_new(os, sites; basis_op_cache_vec=op_cache_vec) end end @@ -154,7 +154,7 @@ function electronic_structure_OpIDSum(N::Int, complexBasisFunctions::Bool)::MPO sites = siteinds("Electron", N; conserve_qns=true) return @time "\tConstructing MPO" MPO_new( - os, sites, operatorNames; basisOpCacheVec=operatorNames + os, sites, operatorNames; basis_op_cache_vec=operatorNames ) end diff --git a/examples/fermi-hubbard.jl b/examples/fermi-hubbard.jl index 42d20b2..2c7e2f2 100644 --- a/examples/fermi-hubbard.jl +++ b/examples/fermi-hubbard.jl @@ -73,7 +73,7 @@ function Fermi_Hubbard_momentum_space( ] for _ in 1:N ] - return @time "\tConstructing MPO" MPO_new(os, sites; basisOpCacheVec=operatorNames) + return @time "\tConstructing MPO" MPO_new(os, sites; basis_op_cache_vec=operatorNames) end end @@ -126,7 +126,7 @@ function Fermi_Hubbard_momentum_space_OpIDSum(N::Int, t::Real=1.0, U::Real=4.0): sites = siteinds("Electron", N; conserve_qns=true) return @time "\tConstructing MPO" MPO_new( - os, sites, operatorNames; basisOpCacheVec=operatorNames + os, sites, operatorNames; basis_op_cache_vec=operatorNames ) end diff --git a/src/MPOConstruction.jl b/src/MPOConstruction.jl index 7954f24..edfebd7 100644 --- a/src/MPOConstruction.jl +++ b/src/MPOConstruction.jl @@ -12,16 +12,16 @@ BlockSparseMatrix{C} = Dict{Tuple{Int,Int},Matrix{C}} return sparse(ret.Q), ret.R, ret.prow, ret.pcol, rank(ret) end -function for_non_zeros(f::Function, A::SparseMatrixCSC, maxRow::Int, maxCol::Int)::Nothing - @assert maxRow <= size(A, 1) - @assert maxCol <= size(A, 2) +function for_non_zeros(f::Function, A::SparseMatrixCSC, max_row::Int, max_col::Int)::Nothing + @assert max_row <= size(A, 1) + @assert max_col <= size(A, 2) rows = rowvals(A) vals = nonzeros(A) - for col in 1:maxCol + for col in 1:max_col for idx in nzrange(A, col) row = rows[idx] - if row <= maxRow + if row <= max_row value = vals[idx] f(value, row, col) end @@ -29,12 +29,12 @@ function for_non_zeros(f::Function, A::SparseMatrixCSC, maxRow::Int, maxCol::Int end end -function for_non_zeros_batch(f::Function, A::SparseMatrixCSC, maxCol::Int)::Nothing - @assert maxCol <= size(A, 2) "$maxCol, $(size(A, 2))" +function for_non_zeros_batch(f::Function, A::SparseMatrixCSC, max_col::Int)::Nothing + @assert max_col <= size(A, 2) "$max_col, $(size(A, 2))" rows = rowvals(A) vals = nonzeros(A) - for col in 1:maxCol + for col in 1:max_col range = nzrange(A, col) isempty(range) && continue f((@view vals[range]), (@view rows[range]), col) @@ -58,19 +58,19 @@ end n::Int, sites::Vector{<:Index}, tol::Real, - opCacheVec::OpCacheVec, + op_cache_vec::OpCacheVec, )::Tuple{Dict{QN,MPOGraph{C}},BlockSparseMatrix{ValType},Index} where {C} - hasQNs = hasqns(sites) - nextGraphs = Dict{QN,MPOGraph{C}}() + has_qns = hasqns(sites) + next_graphs = Dict{QN,MPOGraph{C}}() matrix = BlockSparseMatrix{ValType}() qi = Vector{Pair{QN,Int}}() - outgoingLinkOffset = 0 + outgoing_link_offset = 0 - mIdentityToID = Vector{Int}() + id_of_left_vertex_in_zero_flux_term = Vector{Int}() - for (inQN, g) in graphs + for (in_qn, g) in graphs W = sparse_edge_weights(g) Q, R, prow, pcol, rank = sparse_qr(W, tol) @@ -84,28 +84,28 @@ end Q *= only(R) end - if hasQNs - append!(qi, [inQN => rank]) + if has_qns + append!(qi, [in_qn => rank]) end # Form the local transformation tensor. # At the 0th dummy site we don't need to do this. @timeit "constructing the sparse matrix" if n != 0 ## TODO: This is wastefull - localMatrices = [ - my_matrix([lv.opOnSite], opCacheVec[n]; needsJWString=lv.needsJWString) for - lv in g.leftVertexValues + local_matrices = [ + my_matrix([lv.opOnSite], op_cache_vec[n]; needs_JW_string=lv.needs_JW_string) for + lv in g.left_vertex_values ] for_non_zeros(Q, left_size(g), rank) do weight, i, m - rightLink = m + outgoingLinkOffset + right_link = m + outgoing_link_offset lv = left_value(g, prow[i]) - matrixElement = get!(matrix, (lv.link, rightLink)) do + matrix_element = get!(matrix, (lv.link, right_link)) do return zeros(C, dim(sites[n]), dim(sites[n])) end - matrixElement .+= weight * localMatrices[prow[i]] + matrix_element .+= weight * local_matrices[prow[i]] end end @@ -115,72 +115,72 @@ end # Connect this output \tilde{A}_m to \tilde{B}_m = (R \vec{B})_m # If we are processing the last site then there's no need to do this. @timeit "constructing the next graphs" if n != length(sites) - nextGraphOfZeroFlux = get!(nextGraphs, inQN) do + next_graph_zero_flux = get!(next_graphs, in_qn) do return MPOGraph{C}() end - resize!(mIdentityToID, rank) - mIdentityToID .= 0 + resize!(id_of_left_vertex_in_zero_flux_term, rank) + id_of_left_vertex_in_zero_flux_term .= 0 for_non_zeros_batch(R, right_size(g)) do weights, ms, j j = pcol[j] rv = right_value(g, j) if !isempty(rv.ops) && rv.ops[end].n == n + 1 onsite = pop!(rv.ops) - flux = opCacheVec[n + 1][onsite.id].qnFlux - newIsFermionic = xor(rv.isFermionic, opCacheVec[n + 1][onsite.id].isFermionic) + flux = op_cache_vec[n + 1][onsite.id].qnFlux + is_fermionic = xor(rv.is_fermionic, op_cache_vec[n + 1][onsite.id].is_fermionic) - rv = RightVertex(rv.ops, newIsFermionic) + rv = RightVertex(rv.ops, is_fermionic) - nextGraph = get!(nextGraphs, inQN + flux) do + next_graph = get!(next_graphs, in_qn + flux) do return MPOGraph{C}() end - add_edges!(nextGraph, rv, rank, ms, outgoingLinkOffset, onsite, weights) + add_edges!(next_graph, rv, rank, ms, outgoing_link_offset, onsite, weights) else - add_edges_id!( - nextGraphOfZeroFlux, + add_edges_vector_lookup!( + next_graph_zero_flux, rv, rank, ms, - outgoingLinkOffset, + outgoing_link_offset, OpID(1, n + 1), weights, - mIdentityToID, + id_of_left_vertex_in_zero_flux_term, ) end end - if num_edges(nextGraphOfZeroFlux) == 0 - delete!(nextGraphs, inQN) + if num_edges(next_graph_zero_flux) == 0 + delete!(next_graphs, in_qn) end - for (_, nextGraph) in nextGraphs - empty!(nextGraph.leftVertexIDs) + for (_, next_graph) in next_graphs + empty!(next_graph.left_vertex_ids) end end empty!(g) - outgoingLinkOffset += rank + outgoing_link_offset += rank end - for (_, nextGraph) in nextGraphs - empty!(nextGraph.rightVertexIDs) + for (_, next_graph) in next_graphs + empty!(next_graph.right_vertex_ids) end - if hasQNs - outgoingLink = Index(qi...; tags="Link,l=$n", dir=ITensors.Out) + if has_qns + outgoing_link = Index(qi...; tags="Link,l=$n", dir=ITensors.Out) else - outgoingLink = Index(outgoingLinkOffset; tags="Link,l=$n") + outgoing_link = Index(outgoing_link_offset; tags="Link,l=$n") end - return nextGraphs, matrix, outgoingLink + return next_graphs, matrix, outgoing_link end @timeit function svdMPO_new( ValType::Type{<:Number}, os::OpIDSum{C}, - opCacheVec::OpCacheVec, + op_cache_vec::OpCacheVec, sites::Vector{<:Index}; tol::Real=-1, )::MPO where {C} @@ -195,8 +195,8 @@ end llinks = Vector{Index}(undef, N + 1) for n in 0:N - graphs, symbolicMatrix, llinks[n + 1] = at_site!( - ValType, graphs, n, sites, tol, opCacheVec + graphs, block_sparse_matrix, llinks[n + 1] = at_site!( + ValType, graphs, n, sites, tol, op_cache_vec ) # For the 0th iteration we only care about constructing the graphs for the next site. @@ -213,8 +213,8 @@ end dim(dag(sites[n])), ) - for ((leftLink, rightLink), localOpMatrix) in symbolicMatrix - tensor[leftLink, rightLink, :, :] = localOpMatrix + for ((left_link, right_link), block) in block_sparse_matrix + tensor[left_link, right_link, :, :] = block end H[n] = itensor( @@ -246,51 +246,59 @@ function MPO_new( ValType::Type{<:Number}, os::OpIDSum, sites::Vector{<:Index}, - opCacheVec::OpCacheVec; - basisOpCacheVec=nothing, + op_cache_vec::OpCacheVec; + basis_op_cache_vec=nothing, tol::Real=-1, )::MPO - opCacheVec = to_OpCacheVec(sites, opCacheVec) - basisOpCacheVec = to_OpCacheVec(sites, basisOpCacheVec) - os, opCacheVec = prepare_opID_sum!(os, sites, opCacheVec, basisOpCacheVec) - return svdMPO_new(ValType, os, opCacheVec, sites; tol=tol) + op_cache_vec = to_OpCacheVec(sites, op_cache_vec) + basis_op_cache_vec = to_OpCacheVec(sites, basis_op_cache_vec) + os, op_cache_vec = prepare_opID_sum!(os, sites, op_cache_vec, basis_op_cache_vec) + return svdMPO_new(ValType, os, op_cache_vec, sites; tol=tol) end function MPO_new( - os::OpIDSum, sites::Vector{<:Index}, opCacheVec; basisOpCacheVec=nothing, tol::Real=-1 + os::OpIDSum, + sites::Vector{<:Index}, + op_cache_vec; + basis_op_cache_vec=nothing, + tol::Real=-1, )::MPO - opCacheVec = to_OpCacheVec(sites, opCacheVec) - ValType = determine_val_type(os, opCacheVec) - return MPO_new(ValType, os, sites, opCacheVec; basisOpCacheVec=basisOpCacheVec, tol=tol) + op_cache_vec = to_OpCacheVec(sites, op_cache_vec) + ValType = determine_val_type(os, op_cache_vec) + return MPO_new( + ValType, os, sites, op_cache_vec; basis_op_cache_vec=basis_op_cache_vec, tol=tol + ) end function MPO_new( ValType::Type{<:Number}, os::OpSum, sites::Vector{<:Index}; - basisOpCacheVec=nothing, + basis_op_cache_vec=nothing, tol::Real=-1, )::MPO - opIDSum, opCacheVec = op_sum_to_opID_sum(os, sites) + opID_sum, op_cache_vec = op_sum_to_opID_sum(os, sites) return MPO_new( - ValType, opIDSum, sites, opCacheVec; basisOpCacheVec=basisOpCacheVec, tol=tol + ValType, opID_sum, sites, op_cache_vec; basis_op_cache_vec=basis_op_cache_vec, tol=tol ) end function MPO_new( - os::OpSum, sites::Vector{<:Index}; basisOpCacheVec=nothing, tol::Real=-1 + os::OpSum, sites::Vector{<:Index}; basis_op_cache_vec=nothing, tol::Real=-1 )::MPO - opIDSum, opCacheVec = op_sum_to_opID_sum(os, sites) - return MPO_new(opIDSum, sites, opCacheVec; basisOpCacheVec=basisOpCacheVec, tol=tol) + opID_sum, op_cache_vec = op_sum_to_opID_sum(os, sites) + return MPO_new( + opID_sum, sites, op_cache_vec; basis_op_cache_vec=basis_op_cache_vec, tol=tol + ) end function sparsity(mpo::MPO)::Float64 - numEntries = 0 - numZeros = 0 + num_entries = 0 + num_zeros = 0 for tensor in mpo - numEntries += prod(size(tensor)) - numZeros += prod(size(tensor)) - ITensors.nnz(tensor) + num_entries += prod(size(tensor)) + num_zeros += prod(size(tensor)) - ITensors.nnz(tensor) end - return numZeros / numEntries + return num_zeros / num_entries end diff --git a/src/OpIDSum.jl b/src/OpIDSum.jl index 0f3c8a2..114b5f6 100644 --- a/src/OpIDSum.jl +++ b/src/OpIDSum.jl @@ -1,15 +1,15 @@ struct OpInfo matrix::Matrix - isFermionic::Bool + is_fermionic::Bool qnFlux::QN end -function OpInfo(localOp::Op, site::Index) - tensor = op(site, ITensors.which_op(localOp); ITensors.params(localOp)...) - isFermionic = has_fermion_string(ITensors.name(localOp), site) +function OpInfo(local_op::Op, site::Index) + tensor = op(site, ITensors.which_op(local_op); ITensors.params(local_op)...) + is_fermionic = has_fermion_string(ITensors.name(local_op), site) qnFlux = flux(tensor) - return OpInfo(copy(array(tensor)), isFermionic, isnothing(qnFlux) ? QN() : qnFlux) + return OpInfo(copy(array(tensor)), is_fermionic, isnothing(qnFlux) ? QN() : qnFlux) end OpCacheVec = Vector{Vector{OpInfo}} @@ -91,15 +91,15 @@ function add_to_scalar!(os::OpIDSum{C}, i::Integer, scalar::C)::Nothing where {C return nothing end -function determine_val_type(os::OpIDSum{C}, opCacheVec::OpCacheVec) where {C} +function determine_val_type(os::OpIDSum{C}, op_cache_vec::OpCacheVec) where {C} !all(isreal(scalar) for scalar in os.scalars) && return ComplexF64 - !all(isreal(op.matrix) for opsOfSite in opCacheVec for op in opsOfSite) && + !all(isreal(op.matrix) for ops_of_site in op_cache_vec for op in ops_of_site) && return ComplexF64 return Float64 end @timeit function sort_each_term!( - os::OpIDSum, opCacheVec::OpCacheVec, sites::Vector{<:Index} + os::OpIDSum, op_cache_vec::OpCacheVec, sites::Vector{<:Index} )::Nothing isless_site(o1::OpID, o2::OpID) = o1.n < o2.n @@ -122,7 +122,7 @@ end "The OpSum contains an operator acting on site $(op.n) that extends beyond the number of sites $(length(sites)).", ) - if opCacheVec[op.n][op.id].isFermionic + if op_cache_vec[op.n][op.id].is_fermionic parity = -parity else # Ignore bosonic operators in perm @@ -145,13 +145,11 @@ end end @timeit function merge_terms!(os::OpIDSum{C})::Nothing where {C} - uniqueTermLocations = Dict{ - SubArray{OpID,1,Vector{OpID},Tuple{UnitRange{Int64}},true},Int - }() + unique_location = Dict{SubArray{OpID,1,Vector{OpID},Tuple{UnitRange{Int64}},true},Int}() for i in eachindex(os) scalar, ops = os[i] - loc = get!(uniqueTermLocations, ops, i) + loc = get!(unique_location, ops, i) loc == i && continue add_to_scalar!(os, loc, scalar) @@ -161,22 +159,22 @@ end return nothing end -function convert_to_basis(op::Matrix, basisCache::Vector{OpInfo})::Tuple{ComplexF64,Int} +function convert_to_basis(op::Matrix, basis_cache::Vector{OpInfo})::Tuple{ComplexF64,Int} function check_ratios_are_equal(a1::Number, b1::Number, ai::Number, bi::Number)::Bool # TODO: Add a tolerance param here return abs(a1 * bi - b1 * ai) <= 1e-10 end - for i in eachindex(basisCache) - basisOp = basisCache[i].matrix + for i in eachindex(basis_cache) + basis_op = basis_cache[i].matrix - j = findfirst(val -> val != 0, basisOp) + j = findfirst(val -> val != 0, basis_op) if all( - check_ratios_are_equal(basisOp[j], op[j], basisOp[k], op[k]) for + check_ratios_are_equal(basis_op[j], op[j], basis_op[k], op[k]) for k in CartesianIndices(op) ) - return op[j] / basisOp[j], i + return op[j] / basis_op[j], i end end @@ -200,28 +198,28 @@ function for_equal_sites(f::Function, ops::AbstractVector{OpID})::Nothing end @timeit function rewrite_in_operator_basis( - os::OpIDSum{C}, opCacheVec::OpCacheVec, basisOpCacheVec::OpCacheVec + os::OpIDSum{C}, op_cache_vec::OpCacheVec, basis_op_cache_vec::OpCacheVec )::OpIDSum{C} where {C} @memoize Dict function convert_to_basis_memoized( ops::AbstractVector{OpID} )::Tuple{ComplexF64,Int} n = ops[1].n - localOp = my_matrix(ops, opCacheVec[n]) - return convert_to_basis(localOp, basisOpCacheVec[n]) + local_op = my_matrix(ops, op_cache_vec[n]) + return convert_to_basis(local_op, basis_op_cache_vec[n]) end - newOpIdSum = OpIDSum{C}() + new_os = OpIDSum{C}() - opsInBasis = Vector{OpID}() + ops_in_basis = Vector{OpID}() for i in eachindex(os) scalar, ops = os[i] - empty!(opsInBasis) + empty!(ops_in_basis) for_equal_sites(ops) do a, b - coeff, basisID = convert_to_basis_memoized(@view ops[a:b]) + coeff, basis_id = convert_to_basis_memoized(@view ops[a:b]) - if basisID == 0 + if basis_id == 0 error( "The following operator product cannot be simplified into a single basis operator.\n" * "\tOperator product = $(ops[a:b])\n", @@ -229,13 +227,13 @@ end end scalar *= coeff - push!(opsInBasis, OpID(basisID, ops[a].n)) + push!(ops_in_basis, OpID(basis_id, ops[a].n)) end - push!(newOpIdSum, C(scalar), opsInBasis) + push!(new_os, C(scalar), ops_in_basis) end - return newOpIdSum + return new_os end @timeit function op_sum_to_opID_sum( @@ -243,49 +241,50 @@ end )::Tuple{OpIDSum{C},OpCacheVec} where {C} N = length(sites) - opsOnEachSite = [Dict{Op,Int}(Op("I", n) => 1) for n in 1:N] + ops_on_each_site = [Dict{Op,Int}(Op("I", n) => 1) for n in 1:N] opID_sum = OpIDSum{C}() - opIDTerm = Vector{OpID}() + opID_term = Vector{OpID}() + ## TODO: Don't need $i$ here for (i, term) in enumerate(os) - resize!(opIDTerm, 0) + resize!(opID_term, 0) for op in ITensors.terms(term) op.which_op == "I" && continue n = ITensors.site(op) - opID = get!(opsOnEachSite[n], op, length(opsOnEachSite[n]) + 1) - push!(opIDTerm, OpID(opID, n)) + opID = get!(ops_on_each_site[n], op, length(ops_on_each_site[n]) + 1) + push!(opID_term, OpID(opID, n)) end - push!(opID_sum, ITensors.coefficient(term), opIDTerm) + push!(opID_sum, ITensors.coefficient(term), opID_term) end - opCacheVec = OpCacheVec() - for (n, opsOnSite) in enumerate(opsOnEachSite) - opCache = Vector{OpInfo}(undef, length(opsOnSite)) + op_cache_vec = OpCacheVec() + for (n, ops_on_site) in enumerate(ops_on_each_site) + op_cache = Vector{OpInfo}(undef, length(ops_on_site)) - for (op, id) in opsOnSite + for (op, id) in ops_on_site @assert ITensors.site(op) == n - opCache[id] = OpInfo(op, sites[n]) + op_cache[id] = OpInfo(op, sites[n]) end - push!(opCacheVec, opCache) + push!(op_cache_vec, op_cache) end - return opID_sum, opCacheVec + return opID_sum, op_cache_vec end -@timeit function check_for_errors(os::OpIDSum, opCacheVec::OpCacheVec)::Nothing +@timeit function check_for_errors(os::OpIDSum, op_cache_vec::OpCacheVec)::Nothing for i in eachindex(os) _, ops = os[i] flux = QN() - fermionParity = 0 + fermion_parity = 0 for j in eachindex(ops) opj = ops[j] - flux += opCacheVec[opj.n][opj.id].qnFlux - fermionParity += opCacheVec[opj.n][opj.id].isFermionic + flux += op_cache_vec[opj.n][opj.id].qnFlux + fermion_parity += op_cache_vec[opj.n][opj.id].is_fermionic if j < length(ops) ops[j].n > ops[j + 1].n && error("The operators are not sorted by site.") @@ -295,59 +294,59 @@ end end flux != QN() && error("The term does not have zero flux.") - mod(fermionParity, 2) != 0 && error("Odd parity fermion terms not supported.") + mod(fermion_parity, 2) != 0 && error("Odd parity fermion terms not supported.") end end @timeit function prepare_opID_sum!( os::OpIDSum, sites::Vector{<:Index}, - opCacheVec::OpCacheVec, - basisOpCacheVec::Union{Nothing,OpCacheVec}, + op_cache_vec::OpCacheVec, + basis_op_cache_vec::Union{Nothing,OpCacheVec}, )::Tuple{OpIDSum,OpCacheVec} - sort_each_term!(os, opCacheVec, sites) + sort_each_term!(os, op_cache_vec, sites) merge_terms!(os) - if !isnothing(basisOpCacheVec) - os, opCacheVec = rewrite_in_operator_basis(os, opCacheVec, basisOpCacheVec), - basisOpCacheVec + if !isnothing(basis_op_cache_vec) + os, op_cache_vec = rewrite_in_operator_basis(os, op_cache_vec, basis_op_cache_vec), + basis_op_cache_vec merge_terms!(os) end - check_for_errors(os, opCacheVec) + check_for_errors(os, op_cache_vec) # This is to account for the dummy site at the end - push!(opCacheVec, OpInfo[]) + push!(op_cache_vec, OpInfo[]) - return os, opCacheVec + return os, op_cache_vec end function my_matrix( - term::AbstractVector{OpID}, opCache::Vector{OpInfo}; needsJWString::Bool=false + term::AbstractVector{OpID}, op_cache::Vector{OpInfo}; needs_JW_string::Bool=false ) @assert all(op.n == term[1].n for op in term) if isempty(term) - localMatrix = Matrix{Float64}(I, size(opCache[begin].matrix)...) + local_matrix = Matrix{Float64}(I, size(op_cache[begin].matrix)...) else - localMatrix = prod(opCache[op.id].matrix for op in term) + local_matrix = prod(op_cache[op.id].matrix for op in term) end - if needsJWString - # localMatrix can be returned directly from the opCache, and in this case we need to copy it + if needs_JW_string + # local_matrix can be returned directly from the op_cache, and in this case we need to copy it # before modification. - localMatrix = copy(localMatrix) + local_matrix = copy(local_matrix) # This is a weird way of applying the Jordan-Wigner string it but op("F", sites[n]) is very slow. - if size(localMatrix, 1) == 2 - localMatrix[:, 2] *= -1 - elseif size(localMatrix, 1) == 4 - localMatrix[:, 2] *= -1 - localMatrix[:, 3] *= -1 + if size(local_matrix, 1) == 2 + local_matrix[:, 2] *= -1 + elseif size(local_matrix, 1) == 4 + local_matrix[:, 2] *= -1 + local_matrix[:, 3] *= -1 else error("Unknown fermionic site.") end end - return localMatrix + return local_matrix end diff --git a/src/graph.jl b/src/graph.jl index 24c271a..ddac1b0 100644 --- a/src/graph.jl +++ b/src/graph.jl @@ -1,7 +1,7 @@ struct LeftVertex link::Int32 opOnSite::OpID - needsJWString::Bool + needs_JW_string::Bool end function Base.hash(v::LeftVertex, h::UInt) @@ -14,7 +14,7 @@ end struct RightVertex ops::Vector{OpID} - isFermionic::Bool + is_fermionic::Bool end function Base.hash(v::RightVertex, h::UInt) @@ -26,13 +26,13 @@ function Base.:(==)(v1::RightVertex, v2::RightVertex)::Bool end struct MPOGraph{C} - edgeLeftVertex::Vector{Int} - edgeRightVertex::Vector{Int} - edgeWeight::Vector{C} - leftVertexIDs::Dict{LeftVertex,Int} - rightVertexIDs::Dict{RightVertex,Int} - leftVertexValues::Vector{LeftVertex} - rightVertexValues::Vector{RightVertex} + edge_left_vertex::Vector{Int} + edge_right_vertex::Vector{Int} + edge_weight::Vector{C} + left_vertex_ids::Dict{LeftVertex,Int} + right_vertex_ids::Dict{RightVertex,Int} + left_vertex_values::Vector{LeftVertex} + right_vertex_values::Vector{RightVertex} end function MPOGraph{C}() where {C} @@ -56,7 +56,7 @@ Every term in the sum must be unique and sorted by site, have 0 flux, and even f g = MPOGraph{C}() lv = LeftVertex(0, OpID(0, 0), false) - push!(g.leftVertexValues, lv) + push!(g.left_vertex_values, lv) for i in eachindex(os) scalar, ops = os[i] @@ -64,44 +64,44 @@ Every term in the sum must be unique and sorted by site, have 0 flux, and even f scalar == 0 && continue rv = RightVertex(reverse(ops), false) - push!(g.rightVertexValues, rv) + push!(g.right_vertex_values, rv) - push!(g.edgeLeftVertex, 1) - push!(g.edgeRightVertex, length(g.rightVertexValues)) - push!(g.edgeWeight, scalar) + push!(g.edge_left_vertex, 1) + push!(g.edge_right_vertex, length(g.right_vertex_values)) + push!(g.edge_weight, scalar) end return g end function Base.empty!(g::MPOGraph)::Nothing - empty!(g.edgeLeftVertex) - empty!(g.edgeRightVertex) - empty!(g.edgeWeight) - empty!(g.leftVertexValues) - empty!(g.rightVertexValues) + empty!(g.edge_left_vertex) + empty!(g.edge_right_vertex) + empty!(g.edge_weight) + empty!(g.left_vertex_values) + empty!(g.right_vertex_values) return nothing end function left_size(g::MPOGraph)::Int - return length(g.leftVertexValues) + return length(g.left_vertex_values) end function right_size(g::MPOGraph)::Int - return length(g.rightVertexValues) + return length(g.right_vertex_values) end function num_edges(g::MPOGraph)::Int - return length(g.edgeLeftVertex) + return length(g.edge_left_vertex) end -function left_value(g::MPOGraph{C}, leftID::Int)::LeftVertex where {C} - return g.leftVertexValues[leftID] +function left_value(g::MPOGraph{C}, left_id::Int)::LeftVertex where {C} + return g.left_vertex_values[left_id] end -function right_value(g::MPOGraph{C}, rightID::Int)::RightVertex where {C} - return g.rightVertexValues[rightID] +function right_value(g::MPOGraph{C}, right_id::Int)::RightVertex where {C} + return g.right_vertex_values[right_id] end function add_edges!( @@ -109,68 +109,68 @@ function add_edges!( rv::RightVertex, rank::Int, ms::AbstractVector{Int}, - mOffset::Int, - onsiteOp::OpID, + m_offset::Int, + onsite_op::OpID, weights::AbstractVector{C}, )::Nothing where {C} - rightID = get!(g.rightVertexIDs, rv) do - push!(g.rightVertexValues, rv) - return length(g.rightVertexValues) + right_id = get!(g.right_vertex_ids, rv) do + push!(g.right_vertex_values, rv) + return length(g.right_vertex_values) end for i in 1:length(ms) ms[i] > rank && return nothing - lv = LeftVertex(ms[i] + mOffset, onsiteOp, rv.isFermionic) + lv = LeftVertex(ms[i] + m_offset, onsite_op, rv.is_fermionic) - leftID = get!(g.leftVertexIDs, lv) do - push!(g.leftVertexValues, lv) - return length(g.leftVertexValues) + left_id = get!(g.left_vertex_ids, lv) do + push!(g.left_vertex_values, lv) + return length(g.left_vertex_values) end - push!(g.edgeLeftVertex, leftID) - push!(g.edgeRightVertex, rightID) - push!(g.edgeWeight, weights[i]) + push!(g.edge_left_vertex, left_id) + push!(g.edge_right_vertex, right_id) + push!(g.edge_weight, weights[i]) end return nothing end -function add_edges_id!( +function add_edges_vector_lookup!( g::MPOGraph{C}, rv::RightVertex, rank::Int, ms::AbstractVector{Int}, - mOffset::Int, - onsiteOp::OpID, + m_offset::Int, + onsite_op::OpID, weights::AbstractVector{C}, - fooBar::Vector{Int}, + id_of_left_vertex::Vector{Int}, )::Nothing where {C} - rightID = get!(g.rightVertexIDs, rv) do - push!(g.rightVertexValues, rv) - return length(g.rightVertexValues) + right_id = get!(g.right_vertex_ids, rv) do + push!(g.right_vertex_values, rv) + return length(g.right_vertex_values) end for i in 1:length(ms) m = ms[i] m > rank && return nothing - if fooBar[m] == 0 - push!(g.leftVertexValues, LeftVertex(m + mOffset, onsiteOp, rv.isFermionic)) - fooBar[m] = length(g.leftVertexValues) + if id_of_left_vertex[m] == 0 + push!(g.left_vertex_values, LeftVertex(m + m_offset, onsite_op, rv.is_fermionic)) + id_of_left_vertex[m] = length(g.left_vertex_values) end - push!(g.edgeLeftVertex, fooBar[m]) - push!(g.edgeRightVertex, rightID) - push!(g.edgeWeight, weights[i]) + push!(g.edge_left_vertex, id_of_left_vertex[m]) + push!(g.edge_right_vertex, right_id) + push!(g.edge_weight, weights[i]) end return nothing end @timeit function sparse_edge_weights(g::MPOGraph{C})::SparseMatrixCSC{C,Int} where {C} - @assert length(g.edgeLeftVertex) == length(g.edgeRightVertex) - @assert length(g.edgeLeftVertex) == length(g.edgeWeight) + @assert length(g.edge_left_vertex) == length(g.edge_right_vertex) + @assert length(g.edge_left_vertex) == length(g.edge_weight) - return sparse(g.edgeLeftVertex, g.edgeRightVertex, g.edgeWeight) + return sparse(g.edge_left_vertex, g.edge_right_vertex, g.edge_weight) end diff --git a/test/runtests.jl b/test/runtests.jl index 9090bc5..13b277c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -11,9 +11,12 @@ function compare_MPOs(A::MPO, B::MPO; tol::Real=1e-7)::Nothing end function test_from_OpSum( - os::OpSum, sites::Vector{<:Index}, basisOpCacheVec::Union{Nothing,OpCacheVec}, tol::Real + os::OpSum, + sites::Vector{<:Index}, + basis_op_cache_vec::Union{Nothing,OpCacheVec}, + tol::Real, )::Tuple{MPO,MPO} - mpo = MPO_new(os, sites; tol=tol, basisOpCacheVec=basisOpCacheVec) + mpo = MPO_new(os, sites; tol=tol, basis_op_cache_vec=basis_op_cache_vec) if tol < 0 mpoFromITensor = MPO(os, sites) @@ -178,12 +181,12 @@ function test_Fermi_Hubbard(N::Int, tol::Real)::Nothing "Nup * Ndn", ] - opCacheVec = [ + op_cache_vec = [ [OpInfo(ITensors.Op(name, n), sites[n]) for name in operatorNames] for n in eachindex(sites) ] - test_from_OpSum(os, sites, opCacheVec, tol) + test_from_OpSum(os, sites, op_cache_vec, tol) return nothing end