diff --git a/Project.toml b/Project.toml index e4caee82..bfa7029a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman and contributors"] -version = "0.4.1" +version = "0.4.2" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -51,7 +51,7 @@ DocStringExtensions = "0.8, 0.9" EinExprs = "0.6.4" Graphs = "1.8" GraphsFlows = "0.1.1" -ITensors = "0.3.23" +ITensors = "0.3.58" IsApprox = "0.1" IterTools = "1.4.0" KrylovKit = "0.6.0" diff --git a/README.md b/README.md index 6aec5bbb..d50f4377 100644 --- a/README.md +++ b/README.md @@ -10,10 +10,14 @@ > In short, use this package with caution, and don't expect the interface to be stable > or for us to clearly announce parts of the code we are changing. + + # ITensorNetworks A package to provide general network data structures and tools to use with ITensors.jl. + + ## Installation You can install this package through the Julia package manager: @@ -99,13 +103,13 @@ and 4 edge(s): with vertex data: 4-element Dictionary{Tuple{Int64, Int64}, Any} - (1, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=723|"1×1,1×2")) - (2, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=823|"2×1,2×2")) - (1, 2) │ ((dim=2|id=723|"1×1,1×2"), (dim=2|id=712|"1×2,2×2")) - (2, 2) │ ((dim=2|id=823|"2×1,2×2"), (dim=2|id=712|"1×2,2×2")) + (1, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=598|"1×1,1×2")) + (2, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=457|"2×1,2×2")) + (1, 2) │ ((dim=2|id=598|"1×1,1×2"), (dim=2|id=683|"1×2,2×2")) + (2, 2) │ ((dim=2|id=457|"2×1,2×2"), (dim=2|id=683|"1×2,2×2")) julia> tn[1, 1] -ITensor ord=2 (dim=2|id=74|"1×1,2×1") (dim=2|id=723|"1×1,1×2") +ITensor ord=2 (dim=2|id=712|"1×1,2×1") (dim=2|id=598|"1×1,1×2") NDTensors.EmptyStorage{NDTensors.EmptyNumber, NDTensors.Dense{NDTensors.EmptyNumber, Vector{NDTensors.EmptyNumber}}} julia> neighbors(tn, (1, 1)) @@ -129,8 +133,8 @@ and 1 edge(s): with vertex data: 2-element Dictionary{Tuple{Int64, Int64}, Any} - (1, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=723|"1×1,1×2")) - (1, 2) │ ((dim=2|id=723|"1×1,1×2"), (dim=2|id=712|"1×2,2×2")) + (1, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=598|"1×1,1×2")) + (1, 2) │ ((dim=2|id=598|"1×1,1×2"), (dim=2|id=683|"1×2,2×2")) julia> tn_2 = subgraph(v -> v[1] == 2, tn) ITensorNetwork{Tuple{Int64, Int64}} with 2 vertices: @@ -143,8 +147,8 @@ and 1 edge(s): with vertex data: 2-element Dictionary{Tuple{Int64, Int64}, Any} - (2, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=823|"2×1,2×2")) - (2, 2) │ ((dim=2|id=823|"2×1,2×2"), (dim=2|id=712|"1×2,2×2")) + (2, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=457|"2×1,2×2")) + (2, 2) │ ((dim=2|id=457|"2×1,2×2"), (dim=2|id=683|"1×2,2×2")) ``` @@ -166,9 +170,9 @@ and 2 edge(s): with vertex data: 3-element Dictionary{Int64, Vector{Index}} - 1 │ Index[(dim=2|id=598|"S=1/2,Site,n=1")] - 2 │ Index[(dim=2|id=457|"S=1/2,Site,n=2")] - 3 │ Index[(dim=2|id=683|"S=1/2,Site,n=3")] + 1 │ Index[(dim=2|id=830|"S=1/2,Site,n=1")] + 2 │ Index[(dim=2|id=369|"S=1/2,Site,n=2")] + 3 │ Index[(dim=2|id=558|"S=1/2,Site,n=3")] and edge data: 0-element Dictionary{NamedEdge{Int64}, Vector{Index}} @@ -186,9 +190,9 @@ and 2 edge(s): with vertex data: 3-element Dictionary{Int64, Any} - 1 │ ((dim=2|id=598|"S=1/2,Site,n=1"), (dim=2|id=123|"1,2")) - 2 │ ((dim=2|id=457|"S=1/2,Site,n=2"), (dim=2|id=123|"1,2"), (dim=2|id=656|"2,3… - 3 │ ((dim=2|id=683|"S=1/2,Site,n=3"), (dim=2|id=656|"2,3")) + 1 │ ((dim=2|id=830|"S=1/2,Site,n=1"), (dim=2|id=186|"1,2")) + 2 │ ((dim=2|id=369|"S=1/2,Site,n=2"), (dim=2|id=186|"1,2"), (dim=2|id=430|"2,3… + 3 │ ((dim=2|id=558|"S=1/2,Site,n=3"), (dim=2|id=430|"2,3")) julia> tn2 = ITensorNetwork(s; link_space=2) ITensorNetwork{Int64} with 3 vertices: @@ -203,9 +207,9 @@ and 2 edge(s): with vertex data: 3-element Dictionary{Int64, Any} - 1 │ ((dim=2|id=598|"S=1/2,Site,n=1"), (dim=2|id=382|"1,2")) - 2 │ ((dim=2|id=457|"S=1/2,Site,n=2"), (dim=2|id=382|"1,2"), (dim=2|id=190|"2,3… - 3 │ ((dim=2|id=683|"S=1/2,Site,n=3"), (dim=2|id=190|"2,3")) + 1 │ ((dim=2|id=830|"S=1/2,Site,n=1"), (dim=2|id=994|"1,2")) + 2 │ ((dim=2|id=369|"S=1/2,Site,n=2"), (dim=2|id=994|"1,2"), (dim=2|id=978|"2,3… + 3 │ ((dim=2|id=558|"S=1/2,Site,n=3"), (dim=2|id=978|"2,3")) julia> @visualize tn1; ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ diff --git a/examples/README.jl b/examples/README.jl index 755ca3ab..0157793e 100644 --- a/examples/README.jl +++ b/examples/README.jl @@ -1,3 +1,15 @@ +#' > [!WARNING] +#' > This is a pre-release software. There are no guarantees that functionality won't break +#' > from version to version, though we will try our best to indicate breaking changes +#' > following [semantic versioning](https://semver.org/) (semver) by bumping the minor +#' > version of the package. We are biasing heavily towards "moving fast and breaking things" +#' > during this stage of development, which will allow us to more quickly develop the package +#' > and bring it to a point where we have enough features and are happy enough with the external +#' > interface to officially release it for general public use. +#' > +#' > In short, use this package with caution, and don't expect the interface to be stable +#' > or for us to clearly announce parts of the code we are changing. + #' # ITensorNetworks #' #' A package to provide general network data structures and tools to use with ITensors.jl. diff --git a/src/abstractitensornetwork.jl b/src/abstractitensornetwork.jl index 2110e9de..aec11022 100644 --- a/src/abstractitensornetwork.jl +++ b/src/abstractitensornetwork.jl @@ -733,6 +733,15 @@ end # Common index checking # +function hascommoninds( + ::typeof(siteinds), A::AbstractITensorNetwork{V}, B::AbstractITensorNetwork{V} +) where {V} + for v in vertices(A) + !hascommoninds(siteinds(A, v), siteinds(B, v)) && return false + end + return true +end + function hassameinds( ::typeof(siteinds), A::AbstractITensorNetwork{V}, B::AbstractITensorNetwork{V} ) where {V} diff --git a/src/imports.jl b/src/imports.jl index bb4cfade..999201d8 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -82,6 +82,8 @@ import ITensors: dag, # permute permute, + #commoninds + hascommoninds, # linkdims linkdim, linkdims, diff --git a/src/solvers/alternating_update/alternating_update.jl b/src/solvers/alternating_update/alternating_update.jl index 60e09ecc..7c423b0c 100644 --- a/src/solvers/alternating_update/alternating_update.jl +++ b/src/solvers/alternating_update/alternating_update.jl @@ -94,8 +94,6 @@ function alternating_update( end function alternating_update(operator::AbstractTTN, init_state::AbstractTTN; kwargs...) - check_hascommoninds(siteinds, operator, init_state) - check_hascommoninds(siteinds, operator, init_state') # Permute the indices to have a better memory layout # and minimize permutations operator = ITensors.permute(operator, (linkind, siteinds, linkind)) @@ -106,8 +104,6 @@ end function alternating_update( operator::AbstractTTN, init_state::AbstractTTN, sweep_plans; kwargs... ) - check_hascommoninds(siteinds, operator, init_state) - check_hascommoninds(siteinds, operator, init_state') # Permute the indices to have a better memory layout # and minimize permutations operator = ITensors.permute(operator, (linkind, siteinds, linkind)) @@ -138,10 +134,6 @@ Returns: function alternating_update( operators::Vector{<:AbstractTTN}, init_state::AbstractTTN; kwargs... ) - for operator in operators - check_hascommoninds(siteinds, operator, init_state) - check_hascommoninds(siteinds, operator, init_state') - end operators .= ITensors.permute.(operators, Ref((linkind, siteinds, linkind))) projected_operators = ProjTTNSum(operators) return alternating_update(projected_operators, init_state; kwargs...) @@ -150,10 +142,6 @@ end function alternating_update( operators::Vector{<:AbstractTTN}, init_state::AbstractTTN, sweep_plans; kwargs... ) - for operator in operators - check_hascommoninds(siteinds, operator, init_state) - check_hascommoninds(siteinds, operator, init_state') - end operators .= ITensors.permute.(operators, Ref((linkind, siteinds, linkind))) projected_operators = ProjTTNSum(operators) return alternating_update(projected_operators, init_state, sweep_plans; kwargs...) diff --git a/src/solvers/contract.jl b/src/solvers/contract.jl index 9dfc6b89..34fa78a7 100644 --- a/src/solvers/contract.jl +++ b/src/solvers/contract.jl @@ -27,8 +27,6 @@ function sum_contract( return typeof(tn2)([res]) end - # check_hascommoninds(siteinds, tn1, tn2) - # In case `tn1` and `tn2` have the same internal indices operator = ProjOuterProdTTN{vertextype(first(tn1s))}[] for (tn1, tn2) in zip(tn1s, tn2s) diff --git a/src/treetensornetworks/abstracttreetensornetwork.jl b/src/treetensornetworks/abstracttreetensornetwork.jl index 3bbcf32c..b126bc9b 100644 --- a/src/treetensornetworks/abstracttreetensornetwork.jl +++ b/src/treetensornetworks/abstracttreetensornetwork.jl @@ -242,7 +242,6 @@ function loginner( end ψ1dag = sim(dag(ψ1); sites=[]) traversal_order = reverse(post_order_dfs_vertices(ψ1, root_vertex)) - check_hascommoninds(siteinds, ψ1dag, ψ2) O = ψ1dag[root_vertex] * ψ2[root_vertex] @@ -370,8 +369,6 @@ function inner( y::AbstractTTN, A::AbstractTTN, x::AbstractTTN; root_vertex=default_root_vertex(x, A, y) ) traversal_order = reverse(post_order_dfs_vertices(x, root_vertex)) - check_hascommoninds(siteinds, A, x) - check_hascommoninds(siteinds, A, y) ydag = sim(dag(y); sites=[]) x = sim(x; sites=[]) O = ydag[root_vertex] * A[root_vertex] * x[root_vertex] @@ -397,15 +394,6 @@ function inner( ), ) end - check_hascommoninds(siteinds, A, x) - check_hascommoninds(siteinds, B, y) - for v in vertices(B) - !hascommoninds( - uniqueinds(siteinds(A, v), siteinds(x, v)), uniqueinds(siteinds(B, v), siteinds(y, v)) - ) && error( - "$(typeof(x)) Ax and $(typeof(y)) By must share site indices. On site $v, Ax has site indices $(uniqueinds(siteinds(A, v), (siteinds(x, v)))) while By has site indices $(uniqueinds(siteinds(B, v), siteinds(y, v))).", - ) - end ydag = sim(linkinds, dag(y)) Bdag = sim(linkinds, dag(B)) traversal_order = reverse(post_order_dfs_vertices(x, root_vertex)) diff --git a/src/treetensornetworks/deprecated_opsum_to_ttn.jl b/src/treetensornetworks/deprecated_opsum_to_ttn.jl deleted file mode 100644 index 30e59e54..00000000 --- a/src/treetensornetworks/deprecated_opsum_to_ttn.jl +++ /dev/null @@ -1,636 +0,0 @@ -### This version is deprecated, we are keeping it around for later reference. -# convert ITensors.OpSum to TreeTensorNetwork - -# -# Utility methods -# - -# linear ordering of vertices in tree graph relative to chosen root, chosen outward from root -function find_index_in_tree(site, g::AbstractGraph, root_vertex) - ordering = reverse(post_order_dfs_vertices(g, root_vertex)) - return findfirst(x -> x == site, ordering) -end -function find_index_in_tree(o::Op, g::AbstractGraph, root_vertex) - return find_index_in_tree(ITensors.site(o), g, root_vertex) -end - -# determine 'support' of product operator on tree graph -function span(t::Scaled{C,Prod{Op}}, g::AbstractGraph) where {C} - spn = eltype(g)[] - nterms = length(t) - for i in 1:nterms, j in i:nterms - path = vertex_path(g, ITensors.site(t[i]), ITensors.site(t[j])) - spn = union(spn, path) - end - return spn -end - -# determine whether an operator string crosses a given graph vertex -function crosses_vertex(t::Scaled{C,Prod{Op}}, g::AbstractGraph, v) where {C} - return v ∈ span(t, g) -end - -# -# Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo.jl -# - -""" - svdTTN(os::OpSum{C}, sites::IndsNetwork{V<:Index}, root_vertex::V, kwargs...) where {C,V} - -Construct a dense TreeTensorNetwork from a symbolic OpSum representation of a -Hamiltonian, compressing shared interaction channels. -""" -function svdTTN( - os::OpSum{C}, sites::IndsNetwork{VT,<:Index}, root_vertex::VT; kwargs... -)::TTN where {C,VT} - mindim::Int = get(kwargs, :mindim, 1) - maxdim::Int = get(kwargs, :maxdim, 10000) - cutoff::Float64 = get(kwargs, :cutoff, 1e-15) - - ValType = ITensors.determineValType(ITensors.terms(os)) - - # traverse tree outwards from root vertex - vs = reverse(post_order_dfs_vertices(sites, root_vertex)) # store vertices in fixed ordering relative to root - es = reverse(reverse.(post_order_dfs_edges(sites, root_vertex))) # store edges in fixed ordering relative to root - # some things to keep track of - ranks = Dict(v => degree(sites, v) for v in vs) # rank of every TTN tensor in network - Vs = Dict(e => Matrix{ValType}(undef, 1, 1) for e in es) # link isometries for SVD compression of TTN - inmaps = Dict(e => Dict{Vector{Op},Int}() for e in es) # map from term in Hamiltonian to incoming channel index for every edge - outmaps = Dict(e => Dict{Vector{Op},Int}() for e in es) # map from term in Hamiltonian to outgoing channel index for every edge - inbond_coefs = Dict(e => ITensors.MatElem{ValType}[] for e in es) # bond coefficients for incoming edge channels - site_coef_done = Prod{Op}[] # list of terms for which the coefficient has been added to a site factor - - # temporary symbolic representation of TTN Hamiltonian - tempTTN = Dict(v => ArrElem{Scaled{C,Prod{Op}},ranks[v]}[] for v in vs) - - # build compressed finite state machine representation - for v in vs - # for every vertex, find all edges that contain this vertex - edges = filter(e -> dst(e) == v || src(e) == v, es) - # use the corresponding ordering as index order for tensor elements at this site - dim_in = findfirst(e -> dst(e) == v, edges) - edge_in = (isnothing(dim_in) ? [] : edges[dim_in]) - dims_out = findall(e -> src(e) == v, edges) - edges_out = edges[dims_out] - - # sanity check, leaves only have single incoming or outgoing edge - @assert !isempty(dims_out) || !isnothing(dim_in) - (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf(sites, v) - - for term in os - # loop over OpSum and pick out terms that act on current vertex - crosses_vertex(term, sites, v) || continue - - # filter out factors that come in from the direction of the incoming edge - incoming = filter( - t -> edge_in ∈ edge_path(sites, ITensors.site(t), v), ITensors.terms(term) - ) - # also store all non-incoming factors in standard order, used for channel merging - not_incoming = filter( - t -> edge_in ∉ edge_path(sites, ITensors.site(t), v), ITensors.terms(term) - ) - # filter out factor that acts on current vertex - onsite = filter(t -> (ITensors.site(t) == v), ITensors.terms(term)) - # for every outgoing edge, filter out factors that go out along that edge - outgoing = Dict( - e => filter(t -> e ∈ edge_path(sites, v, ITensors.site(t)), ITensors.terms(term)) - for e in edges_out - ) - - # translate into tensor entry - T_inds = MVector{ranks[v]}(fill(-1, ranks[v])) - bond_row = -1 - bond_col = -1 - if !isempty(incoming) - bond_row = ITensors.posInLink!(inmaps[edge_in], incoming) - bond_col = ITensors.posInLink!(outmaps[edge_in], not_incoming) # get incoming channel - bond_coef = convert(ValType, ITensors.coefficient(term)) - push!(inbond_coefs[edge_in], ITensors.MatElem(bond_row, bond_col, bond_coef)) - T_inds[dim_in] = bond_col - end - for dout in dims_out - T_inds[dout] = ITensors.posInLink!(outmaps[edges[dout]], outgoing[edges[dout]]) # add outgoing channel - end - # if term starts at this site, add its coefficient as a site factor - site_coef = one(C) - if (isnothing(dim_in) || T_inds[dim_in] == -1) && - ITensors.argument(term) ∉ site_coef_done - site_coef = ITensors.coefficient(term) - push!(site_coef_done, ITensors.argument(term)) - end - # add onsite identity for interactions passing through vertex - if isempty(onsite) - if !ITensors.using_auto_fermion() && isfermionic(incoming, sites) - error("No verified fermion support for automatic TTN constructor!") - else - push!(onsite, Op("Id", v)) - end - end - # save indices and value of symbolic tensor entry - el = ArrElem(T_inds, site_coef * Prod(onsite)) - push!(tempTTN[v], el) - end - ITensors.remove_dups!(tempTTN[v]) - # manual truncation: isometry on incoming edge - if !isnothing(dim_in) && !isempty(inbond_coefs[edges[dim_in]]) - M = ITensors.toMatrix(inbond_coefs[edges[dim_in]]) - U, S, V = svd(M) - P = S .^ 2 - truncate!(P; maxdim=maxdim, cutoff=cutoff, mindim=mindim) - tdim = length(P) - nc = size(M, 2) - Vs[edges[dim_in]] = Matrix{ValType}(V[1:nc, 1:tdim]) - end - end - - # compress this tempTTN representation into dense form - - link_space = dictionary([ - e => Index((isempty(outmaps[e]) ? 0 : size(Vs[e], 2)) + 2, edge_tag(e)) for e in es - ]) - - H = TTN(sites) - - for v in vs - - # redo the whole thing like before - edges = filter(e -> dst(e) == v || src(e) == v, es) - dim_in = findfirst(e -> dst(e) == v, edges) - dims_out = findall(e -> src(e) == v, edges) - - # slice isometries at this vertex - Vv = [Vs[e] for e in edges] - - linkinds = [link_space[e] for e in edges] - linkdims = dim.(linkinds) - - H[v] = ITensor() - - for el in tempTTN[v] - T_inds = el.idxs - t = el.val - (abs(coefficient(t)) > eps()) || continue - T = zeros(ValType, linkdims...) - ct = convert(ValType, coefficient(t)) - terminal_dims = findall(d -> T_inds[d] == -1, 1:ranks[v]) # directions in which term starts or ends - normal_dims = findall(d -> T_inds[d] ≠ -1, 1:ranks[v]) # normal dimensions, do truncation thingies - T_inds[terminal_dims] .= 1 # start in channel 1 - for dout in filter(d -> d ∈ terminal_dims, dims_out) - T_inds[dout] = linkdims[dout] # end in channel linkdims[d] for each dimension d - end - if isempty(normal_dims) - T[T_inds...] += ct # on-site term - else - # handle channel compression isometries - dim_ranges = Tuple(size(Vv[d], 2) for d in normal_dims) - for c in CartesianIndices(dim_ranges) - z = ct - temp_inds = copy(T_inds) - for (i, d) in enumerate(normal_dims) - V_factor = Vv[d][T_inds[d], c[i]] - z *= (d == dim_in ? conj(V_factor) : V_factor) # conjugate incoming isemetry factor - temp_inds[d] = 1 + c[i] - end - T[temp_inds...] += z - end - end - T = itensor(T, linkinds) - H[v] += T * computeSiteProd(sites, ITensors.argument(t)) - end - - # add starting and ending identity operators - idT = zeros(ValType, linkdims...) - if isnothing(dim_in) - idT[ones(Int, ranks[v])...] = 1.0 # only one real starting identity - end - # ending identities are a little more involved - if !isnothing(dim_in) - idT[linkdims...] = 1.0 # place identity if all channels end - # place identity from start of incoming channel to start of each single outgoing channel, and end all other channels - idT_end_inds = [linkdims...] - idT_end_inds[dim_in] = 1.0 - for dout in dims_out - idT_end_inds[dout] = 1.0 - idT[idT_end_inds...] = 1.0 - idT_end_inds[dout] = linkdims[dout] # reset - end - end - T = itensor(idT, linkinds) - H[v] += T * ITensorNetworks.computeSiteProd(sites, Prod([Op("Id", v)])) - end - - return H -end - -# -# Tree adaptations of functionalities in ITensors.jl/src/physics/autompo/opsum_to_mpo_generic.jl -# - -# TODO: fix quantum number and fermion support, definitely broken - -# needed an extra `only` compared to ITensors version since IndsNetwork has Vector{<:Index} -# as vertex data -function isfermionic(t::Vector{Op}, sites::IndsNetwork{V,<:Index}) where {V} - p = +1 - for op in t - if has_fermion_string(ITensors.name(op), only(sites[ITensors.site(op)])) - p *= -1 - end - end - return (p == -1) -end - -# only(site(ops[1])) in ITensors breaks for Tuple site labels, had to drop the only -function computeSiteProd(sites::IndsNetwork{V,<:Index}, ops::Prod{Op})::ITensor where {V} - v = ITensors.site(ops[1]) - T = op(sites[v], ITensors.which_op(ops[1]); ITensors.params(ops[1])...) - for j in 2:length(ops) - (ITensors.site(ops[j]) != v) && error("Mismatch of vertex labels in computeSiteProd") - opj = op(sites[v], ITensors.which_op(ops[j]); ITensors.params(ops[j])...) - T = product(T, opj) - end - return T -end - -# changed `isless_site` to use tree vertex ordering relative to root -function sorteachterm(os::OpSum, sites::IndsNetwork{V,<:Index}, root_vertex::V) where {V} - os = copy(os) - findpos(op::Op) = find_index_in_tree(op, sites, root_vertex) - isless_site(o1::Op, o2::Op) = findpos(o1) < findpos(o2) - N = nv(sites) - for n in eachindex(os) - t = os[n] - Nt = length(t) - - if !all(map(v -> has_vertex(sites, v), ITensors.sites(t))) - error( - "The OpSum contains a term $t that does not have support on the underlying graph." - ) - end - - prevsite = N + 1 #keep track of whether we are switching - #to a new site to make sure F string - #is only placed at most once for each site - - # Sort operators in t by site order, - # and keep the permutation used, perm, for analysis below - perm = Vector{Int}(undef, Nt) - sortperm!(perm, ITensors.terms(t); alg=InsertionSort, lt=isless_site) - - t = coefficient(t) * Prod(ITensors.terms(t)[perm]) - - # Identify fermionic operators, - # zeroing perm for bosonic operators, - # and inserting string "F" operators - parity = +1 - for n in Nt:-1:1 - currsite = ITensors.site(t[n]) - fermionic = has_fermion_string( - ITensors.which_op(t[n]), only(sites[ITensors.site(t[n])]) - ) - if !ITensors.using_auto_fermion() && (parity == -1) && (currsite < prevsite) - error("No verified fermion support for automatic TTN constructor!") # no verified support, just throw error - # Put local piece of Jordan-Wigner string emanating - # from fermionic operators to the right - # (Remaining F operators will be put in by svdMPO) - terms(t)[n] = Op("$(ITensors.which_op(t[n])) * F", only(ITensors.site(t[n]))) - end - prevsite = currsite - - if fermionic - error("No verified fermion support for automatic TTN constructor!") # no verified support, just throw error - parity = -parity - else - # Ignore bosonic operators in perm - # by zeroing corresponding entries - perm[n] = 0 - end - end - if parity == -1 - error("Parity-odd fermionic terms not yet supported by AutoTTN") - end - - # Keep only fermionic op positions (non-zero entries) - filter!(!iszero, perm) - # and account for anti-commuting, fermionic operators - # during above sort; put resulting sign into coef - t *= ITensors.parity_sign(perm) - ITensors.terms(os)[n] = t - end - return os -end - -""" - TTN(os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) - TTN(eltype::Type{<:Number}, os::OpSum, sites::IndsNetwork{<:Index}; kwargs...) - -Convert an OpSum object `os` to a TreeTensorNetwork, with indices given by `sites`. -""" -function TTN( - os::OpSum, - sites::IndsNetwork{V,<:Index}; - root_vertex::V=default_root_vertex(sites), - splitblocks=false, - kwargs..., -)::TTN where {V} - length(ITensors.terms(os)) == 0 && error("OpSum has no terms") - is_tree(sites) || error("Site index graph must be a tree.") - is_leaf(sites, root_vertex) || error("Tree root must be a leaf vertex.") - - os = deepcopy(os) - os = sorteachterm(os, sites, root_vertex) - os = ITensors.sortmergeterms(os) # not exported - - if hasqns(first(first(vertex_data(sites)))) - if !is_path_graph(sites) - error( - "OpSum → TTN constructor for QN conserving tensor networks only works for path/linear graphs right now.", - ) - end - # Use `ITensors.MPO` for now until general TTN constructor - # works for QNs. - # TODO: Check it is a path graph and get a linear arrangement! - sites_linear_vertices = [only(sites[v]) for v in vertices(sites)] - vertices_to_linear_vertices = Dictionary(vertices(sites), eachindex(vertices(sites))) - os_linear_vertices = replace_vertices(os, vertices_to_linear_vertices) - mpo = MPO(os_linear_vertices, sites_linear_vertices) - tn = TTN(Dictionary(vertices(sites), [mpo[v] for v in 1:nv(sites)])) - return tn - end - T = svdTTN(os, sites, root_vertex; kwargs...) - if splitblocks - error("splitblocks not yet implemented for AbstractTreeTensorNetwork.") - T = ITensors.splitblocks(linkinds, T) # TODO: make this work - end - return T -end - -function mpo(os::OpSum, external_inds::Vector; kwargs...) - return TTN(os, path_indsnetwork(external_inds); kwargs...) -end - -# Conversion from other formats -function TTN(o::Op, s::IndsNetwork; kwargs...) - return TTN(OpSum{Float64}() + o, s; kwargs...) -end - -function TTN(o::Scaled{C,Op}, s::IndsNetwork; kwargs...) where {C} - return TTN(OpSum{C}() + o, s; kwargs...) -end - -function TTN(o::Sum{Op}, s::IndsNetwork; kwargs...) - return TTN(OpSum{Float64}() + o, s; kwargs...) -end - -function TTN(o::Prod{Op}, s::IndsNetwork; kwargs...) - return TTN(OpSum{Float64}() + o, s; kwargs...) -end - -function TTN(o::Scaled{C,Prod{Op}}, s::IndsNetwork; kwargs...) where {C} - return TTN(OpSum{C}() + o, s; kwargs...) -end - -function TTN(o::Sum{Scaled{C,Op}}, s::IndsNetwork; kwargs...) where {C} - return TTN(OpSum{C}() + o, s; kwargs...) -end - -# Catch-all for leaf eltype specification -function TTN(eltype::Type{<:Number}, os, sites::IndsNetwork; kwargs...) - return NDTensors.convert_scalartype(eltype, TTN(os, sites; kwargs...)) -end - -# -# Tree adaptation of functionalities in ITensors.jl/src/physics/autompo/matelem.jl -# - -################################# -# ArrElem (simple sparse array) # -################################# - -struct ArrElem{T,N} - idxs::MVector{N,Int} - val::T -end - -function Base.:(==)(a1::ArrElem{T,N}, a2::ArrElem{T,N})::Bool where {T,N} - return (a1.idxs == a2.idxs && a1.val == a2.val) -end - -function Base.isless(a1::ArrElem{T,N}, a2::ArrElem{T,N})::Bool where {T,N} - for n in 1:N - if a1.idxs[n] != a2.idxs[n] - return a1.idxs[n] < a2.idxs[n] - end - end - return a1.val < a2.val -end - -# -# Sparse finite state machine construction -# - -# allow sparse arrays with ITensors.Sum entries -function Base.zero(::Type{S}) where {S<:Sum} - return S() -end -Base.zero(t::Sum) = zero(typeof(t)) - -""" - finite_state_machine(os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V) where {C,V} - -Finite state machine generator for ITensors.OpSum Hamiltonian defined on a tree graph. The -site Index graph must be a tree graph, and the chosen root must be a leaf vertex of this -tree. Returns a DataGraph of SparseArrayKit.SparseArrays. -""" -function finite_state_machine( - os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V -) where {C,V} - os = deepcopy(os) - os = sorteachterm(os, sites, root_vertex) - os = ITensors.sortmergeterms(os) - - ValType = ITensors.determineValType(ITensors.terms(os)) - - # sparse symbolic representation of the TTN Hamiltonian as a DataGraph of SparseArrays - sparseTTN = DataGraph{V,SparseArray{Sum{Scaled{ValType,Prod{Op}}}}}( - underlying_graph(sites) - ) - - # traverse tree outwards from root vertex - vs = reverse(post_order_dfs_vertices(sites, root_vertex)) # store vertices in fixed ordering relative to root - es = reverse(reverse.(post_order_dfs_edges(sites, root_vertex))) # store edges in fixed ordering relative to root - # some things to keep track of - ranks = Dict(v => degree(sites, v) for v in vs) # rank of every TTN tensor in network - linkmaps = Dict(e => Dict{Prod{Op},Int}() for e in es) # map from term in Hamiltonian to edge channel index for every edge - site_coef_done = Prod{Op}[] # list of Hamiltonian terms for which the coefficient has been added to a site factor - edge_orders = DataGraph{V,Vector{edgetype(sites)}}(underlying_graph(sites)) # relate indices of sparse TTN tensor to incident graph edges for each site - - for v in vs - # collect all nontrivial entries of the TTN tensor at vertex v - entries = Tuple{MVector{ranks[v],Int},Scaled{ValType,Prod{Op}}}[] - - # for every vertex, find all edges that contain this vertex - edges = filter(e -> dst(e) == v || src(e) == v, es) - # use the corresponding ordering as index order for tensor elements at this site - edge_orders[v] = edges - dim_in = findfirst(e -> dst(e) == v, edges) - edge_in = (isnothing(dim_in) ? [] : edges[dim_in]) - dims_out = findall(e -> src(e) == v, edges) - edges_out = edges[dims_out] - - # sanity check, leaves only have single incoming or outgoing edge - @assert !isempty(dims_out) || !isnothing(dim_in) - (isempty(dims_out) || isnothing(dim_in)) && @assert is_leaf(sites, v) - - for term in os - # loop over OpSum and pick out terms that act on current vertex - crosses_vertex(term, sites, v) || continue - - # filter out factors that come in from the direction of the incoming edge - incoming = filter( - t -> edge_in ∈ edge_path(sites, ITensors.site(t), v), ITensors.terms(term) - ) - # filter out factor that acts on current vertex - onsite = filter(t -> (ITensors.site(t) == v), ITensors.terms(term)) - # for every outgoing edge, filter out factors that go out along that edge - outgoing = Dict( - e => filter(t -> e ∈ edge_path(sites, v, ITensors.site(t)), ITensors.terms(term)) - for e in edges_out - ) - - # translate into sparse tensor entry - T_inds = MVector{ranks[v]}(fill(-1, ranks[v])) - if !isnothing(dim_in) && !isempty(incoming) - T_inds[dim_in] = ITensors.posInLink!(linkmaps[edge_in], ITensors.argument(term)) # get incoming channel - end - for dout in dims_out - if !isempty(outgoing[edges[dout]]) - T_inds[dout] = ITensors.posInLink!(linkmaps[edges[dout]], ITensors.argument(term)) # add outgoing channel - end - end - # if term starts at this site, add its coefficient as a site factor - site_coef = one(C) - if (isnothing(dim_in) || T_inds[dim_in] == -1) && - ITensors.argument(term) ∉ site_coef_done - site_coef = ITensors.coefficient(term) - push!(site_coef_done, ITensors.argument(term)) - end - # add onsite identity for interactions passing through vertex - if isempty(onsite) - if !ITensors.using_auto_fermion() && isfermionic(incoming, sites) - error("No verified fermion support for automatic TTN constructor!") # no verified support, just throw error - else - push!(onsite, Op("Id", v)) - end - end - # save indices and value of sparse tensor entry - el = (T_inds, site_coef * Prod(onsite)) - push!(entries, el) - end - - # handle start and end of operator terms and convert to sparse array - linkdims = Tuple([ - (isempty(linkmaps[e]) ? 0 : maximum(values(linkmaps[e]))) + 2 for e in edges - ]) - T = SparseArray{Sum{Scaled{ValType,Prod{Op}}},ranks[v]}(undef, linkdims) - for (T_inds, t) in entries - if !isnothing(dim_in) - if T_inds[dim_in] == -1 - T_inds[dim_in] = 1 # always start in first channel - else - T_inds[dim_in] += 1 # shift regular channel - end - end - if !isempty(dims_out) - end_dims = filter(d -> T_inds[d] == -1, dims_out) - normal_dims = filter(d -> T_inds[d] != -1, dims_out) - T_inds[end_dims] .= linkdims[end_dims] # always end in last channel - T_inds[normal_dims] .+= 1 # shift regular channels - end - T[T_inds...] += t - end - # add starting and ending identity operators - if isnothing(dim_in) - T[ones(Int, ranks[v])...] += one(ValType) * Prod([Op("Id", v)]) # only one real starting identity - end - # ending identities are a little more involved - if !isnothing(dim_in) - T[linkdims...] += one(ValType) * Prod([Op("Id", v)]) # place identity if all channels end - # place identity from start of incoming channel to start of each single outgoing channel - idT_end_inds = [linkdims...] - idT_end_inds[dim_in] = 1 - for dout in dims_out - idT_end_inds[dout] = 1 - T[idT_end_inds...] += one(ValType) * Prod([Op("Id", v)]) - idT_end_inds[dout] = linkdims[dout] # reset - end - end - sparseTTN[v] = T - end - return sparseTTN, edge_orders -end - -""" - fsmTTN(os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V, kwargs...) where {C,V} - -Construct a dense TreeTensorNetwork from sparse finite state machine -represenatation, without compression. -""" -function fsmTTN( - os::OpSum{C}, sites::IndsNetwork{V,<:Index}, root_vertex::V; trunc=false, kwargs... -)::TTN where {C,V} - ValType = ITensors.determineValType(ITensors.terms(os)) - # start from finite state machine - fsm, edge_orders = finite_state_machine(os, sites, root_vertex) - # some trickery to get link dimension for every edge - link_space = Dict{edgetype(sites),Index}() - function get_linkind!(link_space, e) - if !haskey(link_space, e) - d = findfirst(x -> (x == e || x == reverse(e)), edge_orders[src(e)]) - link_space[e] = Index(size(fsm[src(e)], d), edge_tag(e)) - end - return link_space[e] - end - # compress finite state machine into dense form - H = TTN(sites) - for v in vertices(sites) - linkinds = [get_linkind!(link_space, e) for e in edge_orders[v]] - linkdims = dim.(linkinds) - H[v] = ITensor() - for (T_ind, t) in nonzero_pairs(fsm[v]) - any(map(x -> abs(coefficient(x)) > eps(), t)) || continue - T = zeros(ValType, linkdims...) - T[T_ind] += one(ValType) - T = itensor(T, linkinds) - H[v] += T * computeSiteSum(sites, t) - end - end - # add option for numerical truncation, but throw warning as this can fail sometimes - if trunc - @warn "Naive numerical truncation of TTN Hamiltonian may fail for larger systems." - # see https://github.com/ITensor/ITensors.jl/issues/526 - lognormT = lognorm(H) - H /= exp(lognormT / nv(H)) # TODO: fix broadcasting for in-place assignment - H = truncate(H; root_vertex, kwargs...) - H *= exp(lognormT / nv(H)) - end - return H -end - -function computeSiteSum( - sites::IndsNetwork{V,<:Index}, ops::Sum{Scaled{C,Prod{Op}}} -)::ITensor where {V,C} - ValType = ITensors.determineValType(ITensors.terms(ops)) - v = ITensors.site(ITensors.argument(ops[1])[1]) - T = - convert(ValType, coefficient(ops[1])) * - computeSiteProd(sites, ITensors.argument(ops[1])) - for j in 2:length(ops) - (ITensors.site(ITensors.argument(ops[j])[1]) != v) && - error("Mismatch of vertex labels in computeSiteSum") - T += - convert(ValType, coefficient(ops[j])) * - computeSiteProd(sites, ITensors.argument(ops[j])) - end - return T -end diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index dc99ca5e..b2c2a1f0 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -1,3 +1,5 @@ +using ITensors.ITensorMPS: ITensorMPS + # convert ITensors.OpSum to TreeTensorNetwork # @@ -41,7 +43,7 @@ Hamiltonian, compressing shared interaction channels. """ function ttn_svd(os::OpSum, sites::IndsNetwork, root_vertex; kwargs...) # Function barrier to improve type stability - coefficient_type = ITensors.determineValType(terms(os)) + coefficient_type = ITensorMPS.determineValType(terms(os)) return ttn_svd(coefficient_type, os, sites, root_vertex; kwargs...) end @@ -100,7 +102,7 @@ function ttn_svd( end end inbond_coefs = Dict( - e => Dict{QN,Vector{ITensors.MatElem{coefficient_type}}}() for e in es + e => Dict{QN,Vector{ITensorMPS.MatElem{coefficient_type}}}() for e in es ) # bond coefficients for incoming edge channels site_coef_done = Prod{Op}[] # list of terms for which the coefficient has been added to a site factor # temporary symbolic representation of TTN Hamiltonian @@ -158,13 +160,13 @@ function ttn_svd( coutmap = get!(outmaps, edge_in => not_incoming_qn, Dict{Vector{Op},Int}()) cinmap = get!(inmaps, edge_in => -incoming_qn, Dict{Vector{Op},Int}()) - bond_row = ITensors.posInLink!(cinmap, incoming) - bond_col = ITensors.posInLink!(coutmap, not_incoming) # get incoming channel + bond_row = ITensorMPS.posInLink!(cinmap, incoming) + bond_col = ITensorMPS.posInLink!(coutmap, not_incoming) # get incoming channel bond_coef = convert(coefficient_type, ITensors.coefficient(term)) q_inbond_coefs = get!( - inbond_coefs[edge_in], incoming_qn, ITensors.MatElem{coefficient_type}[] + inbond_coefs[edge_in], incoming_qn, ITensorMPS.MatElem{coefficient_type}[] ) - push!(q_inbond_coefs, ITensors.MatElem(bond_row, bond_col, bond_coef)) + push!(q_inbond_coefs, ITensorMPS.MatElem(bond_row, bond_col, bond_coef)) T_inds[dim_in] = bond_col T_qns[dim_in] = -incoming_qn end @@ -172,7 +174,7 @@ function ttn_svd( coutmap = get!( outmaps, edges[dout] => outgoing_qns[edges[dout]], Dict{Vector{Op},Int}() ) - T_inds[dout] = ITensors.posInLink!(coutmap, outgoing[edges[dout]]) # add outgoing channel + T_inds[dout] = ITensorMPS.posInLink!(coutmap, outgoing[edges[dout]]) # add outgoing channel T_qns[dout] = outgoing_qns[edges[dout]] end # if term starts at this site, add its coefficient as a site factor @@ -197,11 +199,11 @@ function ttn_svd( push!(tempTTN[v], el) end - ITensors.remove_dups!(tempTTN[v]) + ITensorMPS.remove_dups!(tempTTN[v]) # manual truncation: isometry on incoming edge if !isnothing(dim_in) && !isempty(inbond_coefs[edges[dim_in]]) for (q, mat) in inbond_coefs[edges[dim_in]] - M = ITensors.toMatrix(mat) + M = ITensorMPS.toMatrix(mat) U, S, V = svd(M) P = S .^ 2 truncate!(P; maxdim, cutoff, mindim) @@ -489,7 +491,7 @@ function TTN( os = deepcopy(os) os = sorteachterm(os, sites, root_vertex) - os = ITensors.sortmergeterms(os) # not exported + os = ITensorMPS.sortmergeterms(os) # not exported if algorithm == "svd" T = ttn_svd(os, sites, root_vertex; kwargs...) else diff --git a/src/treetensornetworks/solvers/deprecated/projmpo_apply.jl b/src/treetensornetworks/solvers/deprecated/projmpo_apply.jl index 7b73ea13..e69de29b 100644 --- a/src/treetensornetworks/solvers/deprecated/projmpo_apply.jl +++ b/src/treetensornetworks/solvers/deprecated/projmpo_apply.jl @@ -1,85 +0,0 @@ -import ITensors: AbstractProjMPO, makeL!, makeR! -import Base: copy - -""" -A ProjMPOApply represents the application of an -MPO `H` onto an MPS `psi0` but "projected" by -the basis of a different MPS `psi` (which -could be an approximation to H|psi>). - -As an implementation of the AbstractProjMPO -type, it supports multiple `nsite` values for -one- and two-site algorithms. - -``` - *--*--*- -*--*--*--*--*--* -``` -""" -mutable struct ProjMPOApply <: AbstractProjMPO - lpos::Int - rpos::Int - nsite::Int - psi0::MPS - H::MPO - LR::Vector{ITensor} -end - -function ProjMPOApply(psi0::MPS, H::MPO) - return ProjMPOApply(0, length(H) + 1, 2, psi0, H, Vector{ITensor}(undef, length(H))) -end - -function copy(P::ProjMPOApply) - return ProjMPOApply(P.lpos, P.rpos, P.nsite, copy(P.psi0), copy(P.H), copy(P.LR)) -end - -function makeL!(P::ProjMPOApply, psi::MPS, k::Int) - # Save the last `L` that is made to help with caching - # for DiskProjMPO - ll = P.lpos - if ll ≥ k - # Special case when nothing has to be done. - # Still need to change the position if lproj is - # being moved backward. - P.lpos = k - return nothing - end - # Make sure ll is at least 0 for the generic logic below - ll = max(ll, 0) - L = lproj(P) - while ll < k - L = L * P.psi0[ll + 1] * P.H[ll + 1] * dag(psi[ll + 1]) - P.LR[ll + 1] = L - ll += 1 - end - # Needed when moving lproj backward. - P.lpos = k - return P -end - -function makeR!(P::ProjMPOApply, psi::MPS, k::Int) - # Save the last `R` that is made to help with caching - # for DiskProjMPO - rl = P.rpos - if rl ≤ k - # Special case when nothing has to be done. - # Still need to change the position if rproj is - # being moved backward. - P.rpos = k - return nothing - end - N = length(P.H) - # Make sure rl is no bigger than `N + 1` for the generic logic below - rl = min(rl, N + 1) - R = rproj(P) - while rl > k - R = R * P.psi0[rl - 1] * P.H[rl - 1] * dag(psi[rl - 1]) - P.LR[rl - 1] = R - rl -= 1 - end - P.rpos = k - return P -end diff --git a/src/treetensornetworks/solvers/deprecated/projmpo_mps2.jl b/src/treetensornetworks/solvers/deprecated/projmpo_mps2.jl index 7a619624..e69de29b 100644 --- a/src/treetensornetworks/solvers/deprecated/projmpo_mps2.jl +++ b/src/treetensornetworks/solvers/deprecated/projmpo_mps2.jl @@ -1,41 +0,0 @@ -import ITensors: AbstractProjMPO, makeL!, makeR!, contract, nsite -import Base: copy - -mutable struct ProjMPO_MPS2 <: AbstractProjMPO - PH::ProjMPO - Ms::Vector{ProjMPS2} -end - -function ProjMPO_MPS2(H::MPO, M::MPS) - return ProjMPO_MPS2(ProjMPO(H), [ProjMPS2(M)]) -end - -function ProjMPO_MPS2(H::MPO, Mv::Vector{MPS}) - return ProjMPO_MPS2(ProjMPO(H), [ProjMPS2(m) for m in Mv]) -end - -copy(P::ProjMPO_MPS2) = ProjMPO_MPS2(copy(P.PH), copy(P.Ms)) - -nsite(P::ProjMPO_MPS2) = nsite(P.PH) - -function makeL!(P::ProjMPO_MPS2, psi::MPS, k::Int) - makeL!(P.PH, psi, k) - for m in P.Ms - makeL!(m, psi, k) - end - return P -end - -function makeR!(P::ProjMPO_MPS2, psi::MPS, k::Int) - makeR!(P.PH, psi, k) - for m in P.Ms - makeR!(m, psi, k) - end - return P -end - -contract(P::ProjMPO_MPS2, v::ITensor) = contract(P.PH, v) - -proj_mps(P::ProjMPO_MPS2) = [proj_mps(m) for m in P.Ms] - -underlying_graph(P::ProjMPO_MPS2) = named_path_graph(length(P.PH.H)) # tree patch diff --git a/src/treetensornetworks/solvers/deprecated/projmps2.jl b/src/treetensornetworks/solvers/deprecated/projmps2.jl index d908943d..e69de29b 100644 --- a/src/treetensornetworks/solvers/deprecated/projmps2.jl +++ b/src/treetensornetworks/solvers/deprecated/projmps2.jl @@ -1,119 +0,0 @@ -import ITensors: AbstractProjMPO, makeL!, makeR!, contract, OneITensor, site_range -import Base: copy - -""" -Holds the following data where psi -is the MPS being optimized and M is the -MPS held constant by the ProjMPS. -``` - o--o--o--o--o--o--o--o--o--o--o -``` -""" -mutable struct ProjMPS2 <: AbstractProjMPO - lpos::Int - rpos::Int - nsite::Int - M::MPS - LR::Vector{ITensor} -end - -function ProjMPS2(M::MPS) - return ProjMPS2(0, length(M) + 1, 2, M, Vector{ITensor}(undef, length(M))) -end - -Base.length(P::ProjMPS2) = length(P.M) - -function copy(P::ProjMPS2) - return ProjMPS2(P.lpos, P.rpos, P.nsite, copy(P.M), copy(P.LR)) -end - -function makeL!(P::ProjMPS2, psi::MPS, k::Int) - # Save the last `L` that is made to help with caching - # for DiskProjMPO - ll = P.lpos - if ll ≥ k - # Special case when nothing has to be done. - # Still need to change the position if lproj is - # being moved backward. - P.lpos = k - return nothing - end - # Make sure ll is at least 0 for the generic logic below - ll = max(ll, 0) - L = lproj(P) - while ll < k - L = L * psi[ll + 1] * dag(prime(P.M[ll + 1], "Link")) - P.LR[ll + 1] = L - ll += 1 - end - # Needed when moving lproj backward. - P.lpos = k - return P -end - -function makeR!(P::ProjMPS2, psi::MPS, k::Int) - # Save the last `R` that is made to help with caching - # for DiskProjMPO - rl = P.rpos - if rl ≤ k - # Special case when nothing has to be done. - # Still need to change the position if rproj is - # being moved backward. - P.rpos = k - return nothing - end - N = length(P.M) - # Make sure rl is no bigger than `N + 1` for the generic logic below - rl = min(rl, N + 1) - R = rproj(P) - while rl > k - R = R * psi[rl - 1] * dag(prime(P.M[rl - 1], "Link")) - P.LR[rl - 1] = R - rl -= 1 - end - P.rpos = k - return P -end - -function contract(P::ProjMPS2, v::ITensor) - itensor_map = Union{ITensor,OneITensor}[lproj(P)] - append!(itensor_map, [prime(t, "Link") for t in P.M[site_range(P)]]) - push!(itensor_map, rproj(P)) - - # Reverse the contraction order of the map if - # the first tensor is a scalar (for example we - # are at the left edge of the system) - if dim(first(itensor_map)) == 1 - reverse!(itensor_map) - end - - # Apply the map - Mv = v - for it in itensor_map - Mv *= it - end - return Mv -end - -function proj_mps(P::ProjMPS2) - itensor_map = Union{ITensor,OneITensor}[lproj(P)] - append!(itensor_map, [dag(prime(t, "Link")) for t in P.M[site_range(P)]]) - push!(itensor_map, rproj(P)) - - # Reverse the contraction order of the map if - # the first tensor is a scalar (for example we - # are at the left edge of the system) - if dim(first(itensor_map)) == 1 - reverse!(itensor_map) - end - - # Apply the map - m = ITensor(1.0) - for it in itensor_map - #@show inds(it) - m *= it - end - return m -end