diff --git a/Project.toml b/Project.toml index ccd05f2f..2ce895f0 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman and contributors"] -version = "0.6" +version = "0.7" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -56,14 +56,14 @@ GraphsFlows = "0.1.1" ITensors = "0.3.58" IsApprox = "0.1" IterTools = "1.4.0" -KrylovKit = "0.6.0" +KrylovKit = "0.6, 0.7" NamedGraphs = "0.1.23" Observers = "0.2" PackageExtensionCompat = "1" Requires = "1.3" SerializedElementArrays = "0.1" SimpleTraits = "0.9" -SparseArrayKit = "0.2.1" +SparseArrayKit = "0.2.1, 0.3" SplitApplyCombine = "1.2" StaticArrays = "1.5.12" StructWalk = "0.2" diff --git a/README.md b/README.md index 8a92276d..2f86bee0 100644 --- a/README.md +++ b/README.md @@ -105,13 +105,13 @@ and 4 edge(s): with vertex data: 4-element Dictionaries.Dictionary{Tuple{Int64, Int64}, Any} - (1, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=598|"1×1,1×2")) - (2, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=457|"2×1,2×2")) - (1, 2) │ ((dim=2|id=598|"1×1,1×2"), (dim=2|id=683|"1×2,2×2")) - (2, 2) │ ((dim=2|id=457|"2×1,2×2"), (dim=2|id=683|"1×2,2×2")) + (1, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=723|"1×1,1×2")) + (2, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=823|"2×1,2×2")) + (1, 2) │ ((dim=2|id=723|"1×1,1×2"), (dim=2|id=712|"1×2,2×2")) + (2, 2) │ ((dim=2|id=823|"2×1,2×2"), (dim=2|id=712|"1×2,2×2")) julia> tn[1, 1] -ITensor ord=2 (dim=2|id=712|"1×1,2×1") (dim=2|id=598|"1×1,1×2") +ITensor ord=2 (dim=2|id=74|"1×1,2×1") (dim=2|id=723|"1×1,1×2") NDTensors.EmptyStorage{NDTensors.EmptyNumber, NDTensors.Dense{NDTensors.EmptyNumber, Vector{NDTensors.EmptyNumber}}} julia> neighbors(tn, (1, 1)) @@ -135,8 +135,8 @@ and 1 edge(s): with vertex data: 2-element Dictionaries.Dictionary{Tuple{Int64, Int64}, Any} - (1, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=598|"1×1,1×2")) - (1, 2) │ ((dim=2|id=598|"1×1,1×2"), (dim=2|id=683|"1×2,2×2")) + (1, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=723|"1×1,1×2")) + (1, 2) │ ((dim=2|id=723|"1×1,1×2"), (dim=2|id=712|"1×2,2×2")) julia> tn_2 = subgraph(v -> v[1] == 2, tn) ITensorNetworks.ITensorNetwork{Tuple{Int64, Int64}} with 2 vertices: @@ -149,8 +149,8 @@ and 1 edge(s): with vertex data: 2-element Dictionaries.Dictionary{Tuple{Int64, Int64}, Any} - (2, 1) │ ((dim=2|id=712|"1×1,2×1"), (dim=2|id=457|"2×1,2×2")) - (2, 2) │ ((dim=2|id=457|"2×1,2×2"), (dim=2|id=683|"1×2,2×2")) + (2, 1) │ ((dim=2|id=74|"1×1,2×1"), (dim=2|id=823|"2×1,2×2")) + (2, 2) │ ((dim=2|id=823|"2×1,2×2"), (dim=2|id=712|"1×2,2×2")) ``` @@ -176,9 +176,9 @@ and 2 edge(s): with vertex data: 3-element Dictionaries.Dictionary{Int64, Vector{ITensors.Index}} - 1 │ ITensors.Index[(dim=2|id=830|"S=1/2,Site,n=1")] - 2 │ ITensors.Index[(dim=2|id=369|"S=1/2,Site,n=2")] - 3 │ ITensors.Index[(dim=2|id=558|"S=1/2,Site,n=3")] + 1 │ ITensors.Index[(dim=2|id=683|"S=1/2,Site,n=1")] + 2 │ ITensors.Index[(dim=2|id=123|"S=1/2,Site,n=2")] + 3 │ ITensors.Index[(dim=2|id=656|"S=1/2,Site,n=3")] and edge data: 0-element Dictionaries.Dictionary{NamedGraphs.NamedEdge{Int64}, Vector{ITensors.Index}} @@ -196,9 +196,9 @@ and 2 edge(s): with vertex data: 3-element Dictionaries.Dictionary{Int64, Any} - 1 │ ((dim=2|id=830|"S=1/2,Site,n=1"), (dim=2|id=186|"1,2")) - 2 │ ((dim=2|id=369|"S=1/2,Site,n=2"), (dim=2|id=186|"1,2"), (dim=2|id=430|"2,3… - 3 │ ((dim=2|id=558|"S=1/2,Site,n=3"), (dim=2|id=430|"2,3")) + 1 │ ((dim=2|id=683|"S=1/2,Site,n=1"), (dim=2|id=382|"1,2")) + 2 │ ((dim=2|id=123|"S=1/2,Site,n=2"), (dim=2|id=382|"1,2"), (dim=2|id=190|"2,3… + 3 │ ((dim=2|id=656|"S=1/2,Site,n=3"), (dim=2|id=190|"2,3")) julia> tn2 = ITensorNetwork(s; link_space=2) ITensorNetworks.ITensorNetwork{Int64} with 3 vertices: @@ -213,9 +213,9 @@ and 2 edge(s): with vertex data: 3-element Dictionaries.Dictionary{Int64, Any} - 1 │ ((dim=2|id=830|"S=1/2,Site,n=1"), (dim=2|id=994|"1,2")) - 2 │ ((dim=2|id=369|"S=1/2,Site,n=2"), (dim=2|id=994|"1,2"), (dim=2|id=978|"2,3… - 3 │ ((dim=2|id=558|"S=1/2,Site,n=3"), (dim=2|id=978|"2,3")) + 1 │ ((dim=2|id=683|"S=1/2,Site,n=1"), (dim=2|id=934|"1,2")) + 2 │ ((dim=2|id=123|"S=1/2,Site,n=2"), (dim=2|id=934|"1,2"), (dim=2|id=614|"2,3… + 3 │ ((dim=2|id=656|"S=1/2,Site,n=3"), (dim=2|id=614|"2,3")) julia> @visualize tn1; ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ diff --git a/src/ModelNetworks/ModelNetworks.jl b/src/ModelNetworks/ModelNetworks.jl index d23d6e60..41fde3b1 100644 --- a/src/ModelNetworks/ModelNetworks.jl +++ b/src/ModelNetworks/ModelNetworks.jl @@ -1,6 +1,6 @@ module ModelNetworks using Graphs: degree, dst, edges, src -using ..ITensorNetworks: IndsNetwork, delta_network, insert_missing_internal_inds, itensor +using ..ITensorNetworks: IndsNetwork, delta_network, insert_linkinds, itensor using ITensors: commoninds, diagITensor, inds, noprime using LinearAlgebra: Diagonal, eigen using NamedGraphs: NamedGraph @@ -17,7 +17,7 @@ OPTIONAL ARGUMENT: function ising_network( eltype::Type, s::IndsNetwork, beta::Number; h::Number=0.0, szverts=nothing ) - s = insert_missing_internal_inds(s, edges(s); internal_inds_space=2) + s = insert_linkinds(s; link_space=2) tn = delta_network(eltype, s) if (szverts != nothing) for v in szverts diff --git a/src/abstractindsnetwork.jl b/src/abstractindsnetwork.jl index 6b06be6e..d87fefc4 100644 --- a/src/abstractindsnetwork.jl +++ b/src/abstractindsnetwork.jl @@ -23,13 +23,57 @@ end # TODO: Define a generic fallback for `AbstractDataGraph`? DataGraphs.edge_data_type(::Type{<:AbstractIndsNetwork{V,I}}) where {V,I} = Vector{I} +## TODO: Bring these back. +## function indsnetwork_getindex(is::AbstractIndsNetwork, index) +## return get(data_graph(is), index, indtype(is)[]) +## end +## +## function Base.getindex(is::AbstractIndsNetwork, index) +## return indsnetwork_getindex(is, index) +## end +## +## function Base.getindex(is::AbstractIndsNetwork, index::Pair) +## return indsnetwork_getindex(is, index) +## end +## +## function Base.getindex(is::AbstractIndsNetwork, index::AbstractEdge) +## return indsnetwork_getindex(is, index) +## end +## +## function indsnetwork_setindex!(is::AbstractIndsNetwork, value, index) +## data_graph(is)[index] = value +## return is +## end +## +## function Base.setindex!(is::AbstractIndsNetwork, value, index) +## indsnetwork_setindex!(is, value, index) +## return is +## end +## +## function Base.setindex!(is::AbstractIndsNetwork, value, index::Pair) +## indsnetwork_setindex!(is, value, index) +## return is +## end +## +## function Base.setindex!(is::AbstractIndsNetwork, value, index::AbstractEdge) +## indsnetwork_setindex!(is, value, index) +## return is +## end +## +## function Base.setindex!(is::AbstractIndsNetwork, value::Index, index) +## indsnetwork_setindex!(is, value, index) +## return is +## end + # # Index access # function ITensors.uniqueinds(is::AbstractIndsNetwork, edge::AbstractEdge) + # TODO: Replace with `is[v]` once `getindex(::IndsNetwork, ...)` is smarter. inds = IndexSet(get(is, src(edge), Index[])) for ei in setdiff(incident_edges(is, src(edge)), [edge]) + # TODO: Replace with `is[v]` once `getindex(::IndsNetwork, ...)` is smarter. inds = unioninds(inds, get(is, ei, Index[])) end return inds @@ -39,8 +83,8 @@ function ITensors.uniqueinds(is::AbstractIndsNetwork, edge::Pair) return uniqueinds(is, edgetype(is)(edge)) end -function Base.union(tn1::AbstractIndsNetwork, tn2::AbstractIndsNetwork; kwargs...) - return IndsNetwork(union(data_graph(tn1), data_graph(tn2); kwargs...)) +function Base.union(is1::AbstractIndsNetwork, is2::AbstractIndsNetwork; kwargs...) + return IndsNetwork(union(data_graph(is1), data_graph(is2); kwargs...)) end function NamedGraphs.rename_vertices(f::Function, tn::AbstractIndsNetwork) @@ -51,31 +95,49 @@ end # Convenience functions # +function promote_indtypeof(is::AbstractIndsNetwork) + sitetype = mapreduce(promote_indtype, vertices(is); init=Index{Int}) do v + # TODO: Replace with `is[v]` once `getindex(::IndsNetwork, ...)` is smarter. + return mapreduce(typeof, promote_indtype, get(is, v, Index[]); init=Index{Int}) + end + linktype = mapreduce(promote_indtype, edges(is); init=Index{Int}) do e + # TODO: Replace with `is[e]` once `getindex(::IndsNetwork, ...)` is smarter. + return mapreduce(typeof, promote_indtype, get(is, e, Index[]); init=Index{Int}) + end + return promote_indtype(sitetype, linktype) +end + function union_all_inds(is_in::AbstractIndsNetwork...) @assert all(map(ug -> ug == underlying_graph(is_in[1]), underlying_graph.(is_in))) is_out = IndsNetwork(underlying_graph(is_in[1])) for v in vertices(is_out) + # TODO: Remove this check. if any(isassigned(is, v) for is in is_in) + # TODO: Change `get` to `getindex`. is_out[v] = unioninds([get(is, v, Index[]) for is in is_in]...) end end for e in edges(is_out) + # TODO: Remove this check. if any(isassigned(is, e) for is in is_in) + # TODO: Change `get` to `getindex`. is_out[e] = unioninds([get(is, e, Index[]) for is in is_in]...) end end return is_out end -function insert_missing_internal_inds( +function insert_linkinds( indsnetwork::AbstractIndsNetwork, edges=edges(indsnetwork); - internal_inds_space=trivial_space(indsnetwork), + link_space=trivial_space(indsnetwork), ) indsnetwork = copy(indsnetwork) for e in edges + # TODO: Change to check if it is empty. if !isassigned(indsnetwork, e) - iₑ = Index(internal_inds_space, edge_tag(e)) + iₑ = Index(link_space, edge_tag(e)) + # TODO: Allow setting with just `Index`. indsnetwork[e] = [iₑ] end end diff --git a/src/abstractitensornetwork.jl b/src/abstractitensornetwork.jl index 2a6ed37b..e700ed6d 100644 --- a/src/abstractitensornetwork.jl +++ b/src/abstractitensornetwork.jl @@ -116,11 +116,12 @@ end # TODO: broadcasting function Base.union(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork; kwargs...) - tn = ITensorNetwork(union(data_graph(tn1), data_graph(tn2)); kwargs...) + # TODO: Use a different constructor call here? + tn = _ITensorNetwork(union(data_graph(tn1), data_graph(tn2)); kwargs...) # Add any new edges that are introduced during the union for v1 in vertices(tn1) for v2 in vertices(tn2) - if hascommoninds(tn[v1], tn[v2]) + if hascommoninds(tn, v1 => v2) add_edge!(tn, v1 => v2) end end @@ -129,7 +130,8 @@ function Base.union(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork; kw end function NamedGraphs.rename_vertices(f::Function, tn::AbstractITensorNetwork) - return ITensorNetwork(rename_vertices(f, data_graph(tn))) + # TODO: Use a different constructor call here? + return _ITensorNetwork(rename_vertices(f, data_graph(tn))) end # @@ -172,6 +174,8 @@ function Base.Vector{ITensor}(tn::AbstractITensorNetwork) end # Convenience wrapper +# TODO: Delete this and just use `Vector{ITensor}`, or maybe +# it should output a dictionary or be called `eachtensor`? itensors(tn::AbstractITensorNetwork) = Vector{ITensor}(tn) # @@ -182,10 +186,13 @@ function LinearAlgebra.promote_leaf_eltypes(tn::AbstractITensorNetwork) return LinearAlgebra.promote_leaf_eltypes(itensors(tn)) end -function trivial_space(tn::AbstractITensorNetwork) - return trivial_space(tn[first(vertices(tn))]) +function promote_indtypeof(tn::AbstractITensorNetwork) + return mapreduce(promote_indtype, vertices(tn)) do v + return indtype(tn[v]) + end end +# TODO: Delete in favor of `scalartype`. function ITensors.promote_itensor_eltype(tn::AbstractITensorNetwork) return LinearAlgebra.promote_leaf_eltypes(tn) end @@ -464,7 +471,6 @@ function NDTensors.contract( neighbors_src = setdiff(neighbors(tn, src(edge)), [dst(edge)]) neighbors_dst = setdiff(neighbors(tn, dst(edge)), [src(edge)]) new_itensor = tn[src(edge)] * tn[dst(edge)] - # The following is equivalent to: # # tn[dst(edge)] = new_itensor @@ -480,6 +486,7 @@ function NDTensors.contract( for n_dst in neighbors_dst add_edge!(tn, merged_vertex => n_dst) end + setindex_preserve_graph!(tn, new_itensor, merged_vertex) return tn @@ -736,7 +743,8 @@ function norm_network(tn::AbstractITensorNetwork) setindex_preserve_graph!(tndag, dag(tndag[v]), v) end tnket = rename_vertices(v -> (v, 2), data_graph(prime(tndag; sites=[]))) - tntn = ITensorNetwork(union(tnbra, tnket)) + # TODO: Use a different constructor here? + tntn = _ITensorNetwork(union(tnbra, tnket)) for v in vertices(tn) if !isempty(commoninds(tntn[(v, 1)], tntn[(v, 2)])) add_edge!(tntn, (v, 1) => (v, 2)) @@ -809,6 +817,9 @@ end Base.show(io::IO, graph::AbstractITensorNetwork) = show(io, MIME"text/plain"(), graph) +# TODO: Move to an `ITensorNetworksVisualizationInterfaceExt` +# package extension (and define a `VisualizationInterface` package +# based on `ITensorVisualizationCore`.). function ITensorVisualizationCore.visualize( tn::AbstractITensorNetwork, args...; @@ -865,13 +876,13 @@ function site_combiners(tn::AbstractITensorNetwork{V}) where {V} return Cs end -function insert_missing_internal_inds( - tn::AbstractITensorNetwork, edges; internal_inds_space=trivial_space(tn) +function insert_linkinds( + tn::AbstractITensorNetwork, edges=edges(tn); link_space=trivial_space(tn) ) tn = copy(tn) for e in edges - if !hascommoninds(tn[src(e)], tn[dst(e)]) - iₑ = Index(internal_inds_space, edge_tag(e)) + if !hascommoninds(tn, e) + iₑ = Index(link_space, edge_tag(e)) X = onehot(iₑ => 1) tn[src(e)] *= X tn[dst(e)] *= dag(X) @@ -880,12 +891,10 @@ function insert_missing_internal_inds( return tn end -function insert_missing_internal_inds( - tn::AbstractITensorNetwork; internal_inds_space=trivial_space(tn) -) - return insert_internal_inds(tn, edges(tn); internal_inds_space) -end - +# TODO: What to output? Could be an `IndsNetwork`. Or maybe +# that would be a different function `commonindsnetwork`. +# Even in that case, this could output a `Dictionary` +# from the edges to the common inds on that edge. function ITensors.commoninds(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) inds = Index[] for v1 in vertices(tn1) @@ -911,8 +920,8 @@ function ITensorMPS.add(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork if !issetequal(edges_tn1, edges_tn2) new_edges = union(edges_tn1, edges_tn2) - tn1 = insert_missing_internal_inds(tn1, new_edges) - tn2 = insert_missing_internal_inds(tn2, new_edges) + tn1 = insert_linkinds(tn1, new_edges) + tn2 = insert_linkinds(tn2, new_edges) end edges_tn1, edges_tn2 = edges(tn1), edges(tn2) diff --git a/src/indsnetwork.jl b/src/indsnetwork.jl index 6ada1eda..311f6494 100644 --- a/src/indsnetwork.jl +++ b/src/indsnetwork.jl @@ -2,10 +2,10 @@ using DataGraphs: DataGraphs, DataGraph, IsUnderlyingGraph, map_data, vertex_dat using Dictionaries: AbstractDictionary, Indices using Graphs: Graphs using Graphs.SimpleGraphs: AbstractSimpleGraph -# using LinearAlgebra: I # Not sure if this is needed using ITensors: Index, dag using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize -using NamedGraphs: NamedGraphs, AbstractNamedGraph, NamedEdge, NamedGraph, vertextype +using NamedGraphs: + NamedGraphs, AbstractNamedGraph, NamedEdge, NamedGraph, named_path_graph, vertextype struct IndsNetwork{V,I} <: AbstractIndsNetwork{V,I} data_graph::DataGraph{V,Vector{I},Vector{I},NamedGraph{V},NamedEdge{V}} diff --git a/src/itensornetwork.jl b/src/itensornetwork.jl index 5e84138e..96f4b604 100644 --- a/src/itensornetwork.jl +++ b/src/itensornetwork.jl @@ -1,6 +1,6 @@ using DataGraphs: DataGraphs, DataGraph -using Dictionaries: dictionary -using ITensors: ITensor +using Dictionaries: Indices, dictionary +using ITensors: ITensors, ITensor, op, state using NamedGraphs: NamedGraphs, NamedEdge, NamedGraph, vertextype struct Private end @@ -10,8 +10,8 @@ struct Private end """ struct ITensorNetwork{V} <: AbstractITensorNetwork{V} data_graph::DataGraph{V,ITensor,ITensor,NamedGraph{V},NamedEdge{V}} - function ITensorNetwork{V}(::Private, data_graph::DataGraph) where {V} - return new{V}(data_graph) + global function _ITensorNetwork(data_graph::DataGraph) + return new{vertextype(data_graph)}(data_graph) end end @@ -26,106 +26,82 @@ function DataGraphs.underlying_graph_type(TN::Type{<:ITensorNetwork}) return fieldtype(data_graph_type(TN), :underlying_graph) end -function ITensorNetwork{V}(data_graph::DataGraph{V}) where {V} - return ITensorNetwork{V}(Private(), copy(data_graph)) +# Versions taking vertex types. +function ITensorNetwork{V}() where {V} + # TODO: Is there a better way to write this? + # Try using `convert_vertextype`. + return _ITensorNetwork(data_graph_type(ITensorNetwork{V})()) end -function ITensorNetwork{V}(data_graph::DataGraph) where {V} - return ITensorNetwork{V}(Private(), DataGraph{V}(data_graph)) +function ITensorNetwork{V}(tn::ITensorNetwork) where {V} + # TODO: Is there a better way to write this? + # Try using `convert_vertextype`. + return _ITensorNetwork(DataGraph{V}(data_graph(tn))) end - -ITensorNetwork(data_graph::DataGraph) = ITensorNetwork{vertextype(data_graph)}(data_graph) - -function ITensorNetwork{V}() where {V} - return ITensorNetwork{V}(data_graph_type(ITensorNetwork{V})()) +function ITensorNetwork{V}(g::NamedGraph) where {V} + # TODO: Is there a better way to write this? + # Try using `convert_vertextype`. + return ITensorNetwork(NamedGraph{V}(g)) end ITensorNetwork() = ITensorNetwork{Any}() # Conversion +# TODO: Copy or not? ITensorNetwork(tn::ITensorNetwork) = copy(tn) -ITensorNetwork{V}(tn::ITensorNetwork{V}) where {V} = copy(tn) -function ITensorNetwork{V}(tn::AbstractITensorNetwork) where {V} - return ITensorNetwork{V}(Private(), DataGraph{V}(data_graph(tn))) -end -ITensorNetwork(tn::AbstractITensorNetwork) = ITensorNetwork{vertextype(tn)}(tn) NamedGraphs.convert_vertextype(::Type{V}, tn::ITensorNetwork{V}) where {V} = tn NamedGraphs.convert_vertextype(V::Type, tn::ITensorNetwork) = ITensorNetwork{V}(tn) -Base.copy(tn::ITensorNetwork) = ITensorNetwork(copy(data_graph(tn))) +Base.copy(tn::ITensorNetwork) = _ITensorNetwork(copy(data_graph(tn))) # # Construction from collections of ITensors # -ITensorNetwork(vs::Vector, ts::Vector{ITensor}) = ITensorNetwork(Dictionary(vs, ts)) - -ITensorNetwork(ts::Vector{<:Pair{<:Any,ITensor}}) = ITensorNetwork(dictionary(ts)) - -function ITensorNetwork(ts::ITensorCollection) - return ITensorNetwork{keytype(ts)}(ts) -end - -function ITensorNetwork{V}(ts::ITensorCollection) where {V} - g = NamedGraph{V}(collect(eachindex(ts))) +function itensors_to_itensornetwork(ts) + g = NamedGraph(collect(eachindex(ts))) tn = ITensorNetwork(g) for v in vertices(g) tn[v] = ts[v] end return tn end - +function ITensorNetwork(ts::AbstractVector{ITensor}) + return itensors_to_itensornetwork(ts) +end +function ITensorNetwork(ts::AbstractDictionary{<:Any,ITensor}) + return itensors_to_itensornetwork(ts) +end +function ITensorNetwork(ts::AbstractDict{<:Any,ITensor}) + return itensors_to_itensornetwork(ts) +end +function ITensorNetwork(vs::AbstractVector, ts::AbstractVector{ITensor}) + return itensors_to_itensornetwork(Dictionary(vs, ts)) +end +function ITensorNetwork(ts::AbstractVector{<:Pair{<:Any,ITensor}}) + return itensors_to_itensornetwork(dictionary(ts)) +end +# TODO: Decide what this should do, maybe it should factorize? function ITensorNetwork(t::ITensor) - ts = ITensor[t] - return ITensorNetwork{keytype(ts)}(ts) + return itensors_to_itensornetwork([t]) end # # Construction from underyling named graph # -function ITensorNetwork{V}( - eltype::Type, undef::UndefInitializer, graph::AbstractNamedGraph; kwargs... -) where {V} - return ITensorNetwork{V}(eltype, undef, IndsNetwork{V}(graph; kwargs...)) -end - -function ITensorNetwork{V}(eltype::Type, graph::AbstractNamedGraph; kwargs...) where {V} - return ITensorNetwork{V}(eltype, IndsNetwork{V}(graph; kwargs...)) -end - -function ITensorNetwork{V}( - undef::UndefInitializer, graph::AbstractNamedGraph; kwargs... -) where {V} - return ITensorNetwork{V}(undef, IndsNetwork{V}(graph; kwargs...)) -end - -function ITensorNetwork{V}(graph::AbstractNamedGraph; kwargs...) where {V} - return ITensorNetwork{V}(IndsNetwork{V}(graph; kwargs...)) -end - function ITensorNetwork( eltype::Type, undef::UndefInitializer, graph::AbstractNamedGraph; kwargs... ) - return ITensorNetwork{vertextype(graph)}(eltype, undef, graph; kwargs...) + return ITensorNetwork(eltype, undef, IndsNetwork(graph; kwargs...)) end -function ITensorNetwork(eltype::Type, graph::AbstractNamedGraph; kwargs...) - return ITensorNetwork{vertextype(graph)}(eltype, graph; kwargs...) -end - -function ITensorNetwork(undef::UndefInitializer, graph::AbstractNamedGraph; kwargs...) - return ITensorNetwork{vertextype(graph)}(undef, graph; kwargs...) +function ITensorNetwork(f, graph::AbstractNamedGraph; kwargs...) + return ITensorNetwork(f, IndsNetwork(graph; kwargs...)) end function ITensorNetwork(graph::AbstractNamedGraph; kwargs...) - return ITensorNetwork{vertextype(graph)}(graph; kwargs...) -end - -function ITensorNetwork( - itensor_constructor::Function, underlying_graph::AbstractNamedGraph; kwargs... -) - return ITensorNetwork(itensor_constructor, IndsNetwork(underlying_graph; kwargs...)) + return ITensorNetwork(IndsNetwork(graph; kwargs...)) end # @@ -135,138 +111,124 @@ end function ITensorNetwork( eltype::Type, undef::UndefInitializer, graph::AbstractSimpleGraph; kwargs... ) - return ITensorNetwork(eltype, undef, NamedGraph(graph); kwargs...) -end - -function ITensorNetwork(eltype::Type, graph::AbstractSimpleGraph; kwargs...) - return ITensorNetwork(eltype, NamedGraph(graph); kwargs...) + return ITensorNetwork(eltype, undef, IndsNetwork(graph; kwargs...)) end -function ITensorNetwork(undef::UndefInitializer, graph::AbstractSimpleGraph; kwargs...) - return ITensorNetwork(undef, NamedGraph(graph); kwargs...) +function ITensorNetwork(f, graph::AbstractSimpleGraph; kwargs...) + return ITensorNetwork(f, IndsNetwork(graph); kwargs...) end function ITensorNetwork(graph::AbstractSimpleGraph; kwargs...) - return ITensorNetwork(NamedGraph(graph); kwargs...) -end - -function ITensorNetwork( - itensor_constructor::Function, underlying_graph::AbstractSimpleGraph; kwargs... -) - return ITensorNetwork(itensor_constructor, NamedGraph(underlying_graph); kwargs...) + return ITensorNetwork(IndsNetwork(graph); kwargs...) end # # Construction from IndsNetwork # -function ITensorNetwork{V}( - eltype::Type, undef::UndefInitializer, inds_network::IndsNetwork; kwargs... -) where {V} - return ITensorNetwork{V}(inds_network; kwargs...) do v, inds... - return ITensor(eltype, undef, inds...) +function ITensorNetwork(eltype::Type, undef::UndefInitializer, is::IndsNetwork; kwargs...) + return ITensorNetwork(is; kwargs...) do v + return (inds...) -> ITensor(eltype, undef, inds...) end end -function ITensorNetwork{V}(eltype::Type, inds_network::IndsNetwork; kwargs...) where {V} - return ITensorNetwork{V}(inds_network; kwargs...) do v, inds... - return ITensor(eltype, inds...) +function ITensorNetwork(eltype::Type, is::IndsNetwork; kwargs...) + return ITensorNetwork(is; kwargs...) do v + return (inds...) -> ITensor(eltype, inds...) end end -function ITensorNetwork{V}( - undef::UndefInitializer, inds_network::IndsNetwork; kwargs... -) where {V} - return ITensorNetwork{V}(inds_network; kwargs...) do v, inds... - return ITensor(undef, inds...) +function ITensorNetwork(undef::UndefInitializer, is::IndsNetwork; kwargs...) + return ITensorNetwork(is; kwargs...) do v + return (inds...) -> ITensor(undef, inds...) end end -function ITensorNetwork{V}(inds_network::IndsNetwork; kwargs...) where {V} - return ITensorNetwork{V}(inds_network; kwargs...) do v, inds... - return ITensor(inds...) +function ITensorNetwork(is::IndsNetwork; kwargs...) + return ITensorNetwork(is; kwargs...) do v + return (inds...) -> ITensor(inds...) end end -function ITensorNetwork{V}( - itensor_constructor::Function, inds_network::IndsNetwork; link_space=1, kwargs... -) where {V} - # Graphs.jl uses `zero` to create a graph of the same type - # without any vertices or edges. - inds_network_merge = typeof(inds_network)( - underlying_graph(inds_network); link_space, kwargs... - ) - inds_network = union(inds_network_merge, inds_network) - tn = ITensorNetwork{V}() - for v in vertices(inds_network) - add_vertex!(tn, v) - end - for e in edges(inds_network) - add_edge!(tn, e) - end - for v in vertices(tn) - siteinds = get(inds_network, v, indtype(inds_network)[]) - linkinds = [ - get(inds_network, edgetype(inds_network)(v, nv), indtype(inds_network)[]) for - nv in neighbors(inds_network, v) - ] - setindex_preserve_graph!(tn, itensor_constructor(v, siteinds, linkinds...), v) - end - return tn +# TODO: Handle `eltype` and `undef` through `generic_state`. +# `inds` are stored in a `NamedTuple` +function generic_state(f, inds::NamedTuple) + return generic_state(f, reduce(vcat, inds.linkinds; init=inds.siteinds)) end -function ITensorNetwork(inds_network::IndsNetwork; kwargs...) - return ITensorNetwork{vertextype(inds_network)}(inds_network; kwargs...) +function generic_state(f, inds::Vector) + return f(inds) end - -function ITensorNetwork( - eltype::Type, undef::UndefInitializer, inds_network::IndsNetwork; kwargs... -) - return ITensorNetwork{vertextype(inds_network)}(eltype, undef, inds_network; kwargs...) +function generic_state(a::AbstractArray, inds::Vector) + return itensor(a, inds) end - -function ITensorNetwork(eltype::Type, inds_network::IndsNetwork; kwargs...) - return ITensorNetwork{vertextype(inds_network)}(eltype, inds_network; kwargs...) +function generic_state(x::Op, inds::NamedTuple) + # TODO: Figure out what to do if there is more than one site. + @assert length(inds.siteinds) == 2 + i = inds.siteinds[findfirst(i -> plev(i) == 0, inds.siteinds)] + @assert i' ∈ inds.siteinds + site_tensors = [op(x.which_op, i)] + link_tensors = [[onehot(i => 1) for i in inds.linkinds[e]] for e in keys(inds.linkinds)] + return contract(reduce(vcat, link_tensors; init=site_tensors)) end - -function ITensorNetwork(undef::UndefInitializer, inds_network::IndsNetwork; kwargs...) - return ITensorNetwork{vertextype(inds_network)}(undef, inds_network; kwargs...) +function generic_state(s::AbstractString, inds::NamedTuple) + # TODO: Figure out what to do if there is more than one site. + site_tensors = [state(s, only(inds.siteinds))] + link_tensors = [[onehot(i => 1) for i in inds.linkinds[e]] for e in keys(inds.linkinds)] + return contract(reduce(vcat, link_tensors; init=site_tensors)) end -function ITensorNetwork(itensor_constructor::Function, inds_network::IndsNetwork; kwargs...) - return ITensorNetwork{vertextype(inds_network)}( - itensor_constructor, inds_network; kwargs... - ) +# TODO: This is similar to `ModelHamiltonians.to_callable`, +# try merging the two. +to_callable(value::Type) = value +to_callable(value::Function) = value +function to_callable(value::AbstractDict) + return Base.Fix1(getindex, value) ∘ keytype(value) end - -# TODO: Deprecate in favor of version above? Or use keyword argument? -# This can be handled by `ITensorNetwork((v, inds...) -> state(inds...), inds_network)` -function ITensorNetwork(eltype::Type, is::IndsNetwork, initstate::Function) - ψ = ITensorNetwork(eltype, is) - for v in vertices(ψ) - ψ[v] = convert_eltype(eltype, state(initstate(v), only(is[v]))) - end - ψ = insert_links(ψ, edges(is)) - return ψ +function to_callable(value::AbstractDictionary) + return Base.Fix1(getindex, value) ∘ keytype(value) end +function to_callable(value::AbstractArray{<:Any,N}) where {N} + return Base.Fix1(getindex, value) ∘ CartesianIndex +end +to_callable(value) = Returns(value) -function ITensorNetwork(eltype::Type, is::IndsNetwork, initstate::Union{String,Integer}) - return ITensorNetwork(eltype, is, v -> initstate) +function ITensorNetwork(value, is::IndsNetwork; kwargs...) + return ITensorNetwork(to_callable(value), is; kwargs...) end -function ITensorNetwork(is::IndsNetwork, initstate::Union{String,Integer,Function}) - return ITensorNetwork(Number, is, initstate) +function ITensorNetwork( + elt::Type, f, is::IndsNetwork; link_space=trivial_space(is), kwargs... +) + tn = ITensorNetwork(f, is; kwargs...) + for v in vertices(tn) + # TODO: Ideally we would use broadcasting, i.e. `elt.(tn[v])`, + # but that doesn't work right now on ITensors. + tn[v] = ITensors.convert_eltype(elt, tn[v]) + end + return tn end -function insert_links(ψ::ITensorNetwork, edges::Vector=edges(ψ); cutoff=1e-15) - for e in edges - # Define this to work? - # ψ = factorize(ψ, e; cutoff) - ψᵥ₁, ψᵥ₂ = factorize(ψ[src(e)] * ψ[dst(e)], inds(ψ[src(e)]); cutoff, tags=edge_tag(e)) - ψ[src(e)] = ψᵥ₁ - ψ[dst(e)] = ψᵥ₂ +function ITensorNetwork( + itensor_constructor::Function, is::IndsNetwork; link_space=trivial_space(is), kwargs... +) + is = insert_linkinds(is; link_space) + tn = ITensorNetwork{vertextype(is)}() + for v in vertices(is) + add_vertex!(tn, v) + end + for e in edges(is) + add_edge!(tn, e) + end + for v in vertices(tn) + # TODO: Replace with `is[v]` once `getindex(::IndsNetwork, ...)` is smarter. + siteinds = get(is, v, Index[]) + edges = [edgetype(is)(v, nv) for nv in neighbors(is, v)] + linkinds = map(e -> is[e], Indices(edges)) + tensor_v = generic_state(itensor_constructor(v), (; siteinds, linkinds)) + setindex_preserve_graph!(tn, tensor_v, v) end - return ψ + return tn end ITensorNetwork(itns::Vector{ITensorNetwork}) = reduce(⊗, itns) diff --git a/src/itensors.jl b/src/itensors.jl index f49bdb59..b47e4b0f 100644 --- a/src/itensors.jl +++ b/src/itensors.jl @@ -21,32 +21,55 @@ function tensor_sum(A::ITensor, B::ITensor) return A + B end -# TODO: Replace with a trait? -const ITensorCollection = Union{Vector{ITensor},Dictionary{<:Any,ITensor}} - # Patch for contraction sequences with `Key` # leaf values. # TODO: Move patch to `ITensors.jl`. ITensors._contract(As, index::Key) = As[index] -spacetype(::Type{Index}) = Int +indtype(a::ITensor) = promote_indtype(typeof.(inds(a))...) + +spacetype(::Index{T}) where {T} = T spacetype(::Type{<:Index{T}}) where {T} = T -spacetype(T::Type{<:Vector}) = spacetype(eltype(T)) -trivial_space(::Type{<:Integer}) = 1 -trivial_space(::Type{<:Pair{QN}}) = (QN() => 1) -trivial_space(T::Type{<:Vector{<:Pair{QN}}}) = [trivial_space(eltype(T))] +function promote_indtype(is::Vararg{Type{<:Index}}) + return reduce(promote_indtype_rule, is; init=Index{Int}) +end + +function promote_spacetype_rule(type1::Type, type2::Type) + return error("Not implemented") +end + +function promote_spacetype_rule( + type1::Type{<:Integer}, type2::Type{<:Vector{<:Pair{QN,T2}}} +) where {T2<:Integer} + return Vector{Pair{QN,promote_type(type1, T2)}} +end + +function promote_spacetype_rule( + type1::Type{<:Vector{<:Pair{QN,<:Integer}}}, type2::Type{<:Integer} +) + return promote_spacetype_rule(type2, type1) +end + +function promote_spacetype_rule( + type1::Type{<:Vector{<:Pair{QN,T1}}}, type2::Type{<:Vector{<:Pair{QN,T2}}} +) where {T1<:Integer,T2<:Integer} + return Vector{Pair{QN,promote_type(T1, T2)}} +end -_trivial_space(T::Type) = trivial_space(spacetype(T)) -_trivial_space(x::Any) = trivial_space(typeof(x)) +function promote_spacetype_rule(type1::Type{<:Integer}, type2::Type{<:Integer}) + return promote_type(type1, type2) +end + +function promote_indtype_rule(type1::Type{<:Index}, type2::Type{<:Index}) + return Index{promote_spacetype_rule(spacetype(type1), spacetype(type2))} +end -trivial_space(T::Type{<:Index}) = _trivial_space(T) -trivial_space(T::Type{<:Vector}) = _trivial_space(T) +trivial_space(x) = trivial_space(promote_indtypeof(x)) +trivial_space(x::Type) = trivial_space(promote_indtype(x)) -trivial_space(x::Index) = _trivial_space(x) -trivial_space(x::Vector{<:Index}) = _trivial_space(x) -trivial_space(x::ITensor) = trivial_space(inds(x)) -trivial_space(x::Tuple{Vararg{Index}}) = trivial_space(first(x)) +trivial_space(i::Type{<:Index{<:Integer}}) = 1 +trivial_space(i::Type{<:Index{<:Vector{<:Pair{<:QN,<:Integer}}}}) = [QN() => 1] """ Given an input tensor and a Dict (ind_to_newind), replace inds of tensor that are also diff --git a/src/solvers/alternating_update/alternating_update.jl b/src/solvers/alternating_update/alternating_update.jl index c70c8433..8fae9532 100644 --- a/src/solvers/alternating_update/alternating_update.jl +++ b/src/solvers/alternating_update/alternating_update.jl @@ -100,9 +100,6 @@ function alternating_update( end function alternating_update(operator::AbstractTTN, init_state::AbstractTTN; kwargs...) - # Permute the indices to have a better memory layout - # and minimize permutations - operator = ITensors.permute(operator, (linkind, siteinds, linkind)) projected_operator = ProjTTN(operator) return alternating_update(projected_operator, init_state; kwargs...) end @@ -110,9 +107,6 @@ end function alternating_update( operator::AbstractTTN, init_state::AbstractTTN, sweep_plans; kwargs... ) - # Permute the indices to have a better memory layout - # and minimize permutations - operator = ITensors.permute(operator, (linkind, siteinds, linkind)) projected_operator = ProjTTN(operator) return alternating_update(projected_operator, init_state, sweep_plans; kwargs...) end @@ -140,7 +134,6 @@ Returns: function alternating_update( operators::Vector{<:AbstractTTN}, init_state::AbstractTTN; kwargs... ) - operators .= ITensors.permute.(operators, Ref((linkind, siteinds, linkind))) projected_operators = ProjTTNSum(operators) return alternating_update(projected_operators, init_state; kwargs...) end @@ -148,7 +141,6 @@ end function alternating_update( operators::Vector{<:AbstractTTN}, init_state::AbstractTTN, sweep_plans; kwargs... ) - operators .= ITensors.permute.(operators, Ref((linkind, siteinds, linkind))) projected_operators = ProjTTNSum(operators) return alternating_update(projected_operators, init_state, sweep_plans; kwargs...) end diff --git a/src/solvers/insert/insert.jl b/src/solvers/insert/insert.jl index e17ff39c..a0250c95 100644 --- a/src/solvers/insert/insert.jl +++ b/src/solvers/insert/insert.jl @@ -2,7 +2,7 @@ # are essentially inverse operations, adapted for different kinds of # algorithms and networks. -# sort of 2-site replacebond!; TODO: use dense TTN constructor instead +# TODO: use dense TTN constructor to make this more general. function default_inserter( state::AbstractTTN, phi::ITensor, @@ -27,9 +27,8 @@ function default_inserter( v = ortho_vert end state[v] = phi - state = set_ortho_center(state, [v]) - @assert isortho(state) && only(ortho_center(state)) == v - normalize && (state[v] ./= norm(state[v])) + state = set_ortho_region(state, [v]) + normalize && (state[v] /= norm(state[v])) return state, spec end @@ -46,6 +45,6 @@ function default_inserter( ) v = only(setdiff(support(region), [ortho])) state[v] *= phi - state = set_ortho_center(state, [v]) + state = set_ortho_region(state, [v]) return state, nothing end diff --git a/src/specialitensornetworks.jl b/src/specialitensornetworks.jl index 806c397f..d36d4778 100644 --- a/src/specialitensornetworks.jl +++ b/src/specialitensornetworks.jl @@ -7,43 +7,43 @@ using Distributions: Distribution RETURN A TENSOR NETWORK WITH COPY TENSORS ON EACH VERTEX. Note that passing a link_space will mean the indices of the resulting network don't match those of the input indsnetwork """ -function delta_network(eltype::Type, s::IndsNetwork; link_space=nothing) - return ITensorNetwork((v, inds...) -> delta(eltype, inds...), s; link_space) +function delta_network(eltype::Type, s::IndsNetwork; kwargs...) + return ITensorNetwork(s; kwargs...) do v + return inds -> delta(eltype, inds) + end end -function delta_network(s::IndsNetwork; link_space=nothing) - return delta_network(Float64, s; link_space) +function delta_network(s::IndsNetwork; kwargs...) + return delta_network(Float64, s; kwargs...) end -function delta_network(eltype::Type, graph::AbstractNamedGraph; link_space=nothing) - return delta_network(eltype, IndsNetwork(graph; link_space)) +function delta_network(eltype::Type, graph::AbstractNamedGraph; kwargs...) + return delta_network(eltype, IndsNetwork(graph; kwargs...)) end -function delta_network(graph::AbstractNamedGraph; link_space=nothing) - return delta_network(Float64, graph; link_space) +function delta_network(graph::AbstractNamedGraph; kwargs...) + return delta_network(Float64, graph; kwargs...) end """ Build an ITensor network on a graph specified by the inds network s. Bond_dim is given by link_space and entries are randomised (normal distribution, mean 0 std 1) """ -function random_tensornetwork(eltype::Type, s::IndsNetwork; link_space=nothing) - return ITensorNetwork(s; link_space) do v, inds... - itensor(randn(eltype, dim(inds)...), inds...) +function random_tensornetwork(eltype::Type, s::IndsNetwork; kwargs...) + return ITensorNetwork(s; kwargs...) do v + return inds -> itensor(randn(eltype, dim.(inds)...), inds) end end -function random_tensornetwork(s::IndsNetwork; link_space=nothing) - return random_tensornetwork(Float64, s; link_space) +function random_tensornetwork(s::IndsNetwork; kwargs...) + return random_tensornetwork(Float64, s; kwargs...) end -@traitfn function random_tensornetwork( - eltype::Type, g::::IsUnderlyingGraph; link_space=nothing -) - return random_tensornetwork(eltype, IndsNetwork(g); link_space) +@traitfn function random_tensornetwork(eltype::Type, g::::IsUnderlyingGraph; kwargs...) + return random_tensornetwork(eltype, IndsNetwork(g); kwargs...) end -@traitfn function random_tensornetwork(g::::IsUnderlyingGraph; link_space=nothing) - return random_tensornetwork(Float64, IndsNetwork(g); link_space) +@traitfn function random_tensornetwork(g::::IsUnderlyingGraph; kwargs...) + return random_tensornetwork(Float64, IndsNetwork(g); kwargs...) end """ @@ -51,16 +51,14 @@ Build an ITensor network on a graph specified by the inds network s. Bond_dim is given by link_space and entries are randomized. The random distribution is based on the input argument `distribution`. """ -function random_tensornetwork( - distribution::Distribution, s::IndsNetwork; link_space=nothing -) - return ITensorNetwork(s; link_space) do v, inds... - itensor(rand(distribution, dim(inds)...), inds...) +function random_tensornetwork(distribution::Distribution, s::IndsNetwork; kwargs...) + return ITensorNetwork(s; kwargs...) do v + return inds -> itensor(rand(distribution, dim.(inds)...), inds) end end @traitfn function random_tensornetwork( - distribution::Distribution, g::::IsUnderlyingGraph; link_space=nothing + distribution::Distribution, g::::IsUnderlyingGraph; kwargs... ) - return random_tensornetwork(distribution, IndsNetwork(g); link_space) + return random_tensornetwork(distribution, IndsNetwork(g); kwargs...) end diff --git a/src/tebd.jl b/src/tebd.jl index 00fe5f35..edf5a188 100644 --- a/src/tebd.jl +++ b/src/tebd.jl @@ -19,7 +19,7 @@ function tebd( if step % print_frequency == 0 @show step, (step - 1) * Δβ, β end - ψ = insert_links(ψ) + ψ = insert_linkinds(ψ) ψ = apply(u⃗, ψ; cutoff, maxdim, normalize=true, ortho, kwargs...) if ortho for v in vertices(ψ) diff --git a/src/tensornetworkoperators.jl b/src/tensornetworkoperators.jl index 34c332dc..080c723b 100644 --- a/src/tensornetworkoperators.jl +++ b/src/tensornetworkoperators.jl @@ -10,7 +10,8 @@ function gate_group_to_tno(s::IndsNetwork, gates::Vector{ITensor}) #Construct indsnetwork for TNO s_O = union_all_inds(s, prime(s; links=[])) - O = delta_network(s_O) + # Make a TNO with `I` on every site. + O = ITensorNetwork(Op("I"), s_O) for gate in gates v⃗ = vertices(s)[findall(i -> (length(commoninds(s[i], inds(gate))) != 0), vertices(s))] diff --git a/src/treetensornetworks/abstracttreetensornetwork.jl b/src/treetensornetworks/abstracttreetensornetwork.jl index 5bd3045c..35cbd128 100644 --- a/src/treetensornetworks/abstracttreetensornetwork.jl +++ b/src/treetensornetworks/abstracttreetensornetwork.jl @@ -1,8 +1,8 @@ using Graphs: has_vertex using NamedGraphs: edge_path, leaf_vertices, post_order_dfs_edges, post_order_dfs_vertices using IsApprox: IsApprox, Approx -using ITensors: directsum, hasinds, permute, plev -using ITensors.ITensorMPS: isortho, linkind, loginner, lognorm, orthogonalize +using ITensors: @Algorithm_str, directsum, hasinds, permute, plev +using ITensors.ITensorMPS: linkind, loginner, lognorm, orthogonalize using TupleTools: TupleTools abstract type AbstractTreeTensorNetwork{V} <: AbstractITensorNetwork{V} end @@ -17,8 +17,8 @@ end # Field access # -ITensorNetwork(ψ::AbstractTTN) = ψ.itensor_network -ortho_center(ψ::AbstractTTN) = ψ.ortho_center +ITensorNetwork(tn::AbstractTTN) = error("Not implemented") +ortho_region(tn::AbstractTTN) = error("Not implemented") function default_root_vertex(gs::AbstractGraph...) # @assert all(is_tree.(gs)) @@ -29,29 +29,28 @@ end # Orthogonality center # -ITensorMPS.isortho(ψ::AbstractTTN) = isone(length(ortho_center(ψ))) - -function set_ortho_center(ψ::AbstractTTN{V}, new_center::Vector{<:V}) where {V} - return typeof(ψ)(itensor_network(ψ), new_center) +function set_ortho_region(tn::AbstractTTN, new_region) + return error("Not implemented") end -reset_ortho_center(ψ::AbstractTTN) = set_ortho_center(ψ, vertices(ψ)) - # # Orthogonalization # -function ITensorMPS.orthogonalize(ψ::AbstractTTN{V}, root_vertex::V; kwargs...) where {V} - (isortho(ψ) && only(ortho_center(ψ)) == root_vertex) && return ψ - if isortho(ψ) - edge_list = edge_path(ψ, only(ortho_center(ψ)), root_vertex) +function ITensorMPS.orthogonalize(tn::AbstractTTN, ortho_center; kwargs...) + if isone(length(ortho_region(tn))) && ortho_center == only(ortho_region(tn)) + return tn + end + # TODO: Rewrite this in a more general way. + if isone(length(ortho_region(tn))) + edge_list = edge_path(tn, only(ortho_region(tn)), ortho_center) else - edge_list = post_order_dfs_edges(ψ, root_vertex) + edge_list = post_order_dfs_edges(tn, ortho_center) end for e in edge_list - ψ = orthogonalize(ψ, e) + tn = orthogonalize(tn, e) end - return set_ortho_center(ψ, [root_vertex]) + return set_ortho_region(tn, [ortho_center]) end # For ambiguity error @@ -64,14 +63,14 @@ end # Truncation # -function Base.truncate(ψ::AbstractTTN; root_vertex=default_root_vertex(ψ), kwargs...) - for e in post_order_dfs_edges(ψ, root_vertex) +function Base.truncate(tn::AbstractTTN; root_vertex=default_root_vertex(tn), kwargs...) + for e in post_order_dfs_edges(tn, root_vertex) # always orthogonalize towards source first to make truncations controlled - ψ = orthogonalize(ψ, src(e)) - ψ = truncate(ψ, e; kwargs...) - ψ = set_ortho_center(ψ, [dst(e)]) + tn = orthogonalize(tn, src(e)) + tn = truncate(tn, e; kwargs...) + tn = set_ortho_region(tn, [dst(e)]) end - return ψ + return tn end # For ambiguity error @@ -84,127 +83,125 @@ end # # TODO: decide on contraction order: reverse dfs vertices or forward dfs edges? -function NDTensors.contract( - ψ::AbstractTTN{V}, root_vertex::V=default_root_vertex(ψ); kwargs... -) where {V} - ψ = copy(ψ) +function NDTensors.contract(tn::AbstractTTN, root_vertex=default_root_vertex(tn); kwargs...) + tn = copy(tn) # reverse post order vertices - traversal_order = reverse(post_order_dfs_vertices(ψ, root_vertex)) - return contract(ITensorNetwork(ψ); sequence=traversal_order, kwargs...) + traversal_order = reverse(post_order_dfs_vertices(tn, root_vertex)) + return contract(ITensorNetwork(tn); sequence=traversal_order, kwargs...) # # forward post order edges - # ψ = copy(ψ) - # for e in post_order_dfs_edges(ψ, root_vertex) - # ψ = contract(ψ, e) + # tn = copy(tn) + # for e in post_order_dfs_edges(tn, root_vertex) + # tn = contract(tn, e) # end - # return ψ[root_vertex] + # return tn[root_vertex] end function ITensors.inner( - ϕ::AbstractTTN, ψ::AbstractTTN; root_vertex=default_root_vertex(ϕ, ψ) + x::AbstractTTN, y::AbstractTTN; root_vertex=default_root_vertex(x, y) ) - ϕᴴ = sim(dag(ϕ); sites=[]) - ψ = sim(ψ; sites=[]) - ϕψ = ϕᴴ ⊗ ψ + xᴴ = sim(dag(x); sites=[]) + y = sim(y; sites=[]) + xy = xᴴ ⊗ y # TODO: find the largest tensor and use it as # the `root_vertex`. - for e in post_order_dfs_edges(ψ, root_vertex) - if has_vertex(ϕψ, (src(e), 2)) - ϕψ = contract(ϕψ, (src(e), 2) => (src(e), 1)) + for e in post_order_dfs_edges(y, root_vertex) + if has_vertex(xy, (src(e), 2)) + xy = contract(xy, (src(e), 2) => (src(e), 1)) end - ϕψ = contract(ϕψ, (src(e), 1) => (dst(e), 1)) - if has_vertex(ϕψ, (dst(e), 2)) - ϕψ = contract(ϕψ, (dst(e), 2) => (dst(e), 1)) + xy = contract(xy, (src(e), 1) => (dst(e), 1)) + if has_vertex(xy, (dst(e), 2)) + xy = contract(xy, (dst(e), 2) => (dst(e), 1)) end end - return ϕψ[root_vertex, 1][] + return xy[root_vertex, 1][] end -function LinearAlgebra.norm(ψ::AbstractTTN) - if isortho(ψ) - return norm(ψ[only(ortho_center(ψ))]) +function LinearAlgebra.norm(tn::AbstractTTN) + if isone(length(ortho_region(tn))) + return norm(tn[only(ortho_region(tn))]) end - return √(abs(real(inner(ψ, ψ)))) + return √(abs(real(inner(tn, tn)))) end # # Utility # -function LinearAlgebra.normalize!(ψ::AbstractTTN) - c = ortho_center(ψ) - lognorm_ψ = lognorm(ψ) - if lognorm_ψ == -Inf - return ψ +function LinearAlgebra.normalize!(tn::AbstractTTN) + c = ortho_region(tn) + lognorm_tn = lognorm(tn) + if lognorm_tn == -Inf + return tn end - z = exp(lognorm_ψ / length(c)) + z = exp(lognorm_tn / length(c)) for v in c - ψ[v] ./= z + tn[v] ./= z end - return ψ + return tn end -function LinearAlgebra.normalize(ψ::AbstractTTN) - return normalize!(copy(ψ)) +function LinearAlgebra.normalize(tn::AbstractTTN) + return normalize!(copy(tn)) end -function _apply_to_orthocenter!(f, ψ::AbstractTTN, x) - v = first(ortho_center(ψ)) - ψ[v] = f(ψ[v], x) - return ψ +function _apply_to_ortho_region!(f, tn::AbstractTTN, x) + v = first(ortho_region(tn)) + tn[v] = f(tn[v], x) + return tn end -function _apply_to_orthocenter(f, ψ::AbstractTTN, x) - return _apply_to_orthocenter!(f, copy(ψ), x) +function _apply_to_ortho_region(f, tn::AbstractTTN, x) + return _apply_to_ortho_region!(f, copy(tn), x) end -Base.:*(ψ::AbstractTTN, α::Number) = _apply_to_orthocenter(*, ψ, α) +Base.:*(tn::AbstractTTN, α::Number) = _apply_to_ortho_region(*, tn, α) -Base.:*(α::Number, ψ::AbstractTTN) = ψ * α +Base.:*(α::Number, tn::AbstractTTN) = tn * α -Base.:/(ψ::AbstractTTN, α::Number) = _apply_to_orthocenter(/, ψ, α) +Base.:/(tn::AbstractTTN, α::Number) = _apply_to_ortho_region(/, tn, α) -Base.:-(ψ::AbstractTTN) = -1 * ψ +Base.:-(tn::AbstractTTN) = -1 * tn -function LinearAlgebra.rmul!(ψ::AbstractTTN, α::Number) - return _apply_to_orthocenter!(*, ψ, α) +function LinearAlgebra.rmul!(tn::AbstractTTN, α::Number) + return _apply_to_ortho_region!(*, tn, α) end -function ITensorMPS.lognorm(ψ::AbstractTTN) - if isortho(ψ) - return log(norm(ψ[only(ortho_center(ψ))])) +function ITensorMPS.lognorm(tn::AbstractTTN) + if isone(length(ortho_region(tn))) + return log(norm(tn[only(ortho_region(tn))])) end - lognorm2_ψ = loginner(ψ, ψ) - rtol = eps(real(scalartype(ψ))) * 10 + lognorm2_tn = loginner(tn, tn) + rtol = eps(real(scalartype(tn))) * 10 atol = rtol - if !IsApprox.isreal(lognorm2_ψ, Approx(; rtol=rtol, atol=atol)) + if !IsApprox.isreal(lognorm2_tn, Approx(; rtol=rtol, atol=atol)) @warn "log(norm²) is $lognorm2_T, which is not real up to a relative tolerance of $rtol and an absolute tolerance of $atol. Taking the real part, which may not be accurate." end - return 0.5 * real(lognorm2_ψ) + return 0.5 * real(lognorm2_tn) end -function logdot(ψ1::TTNT, ψ2::TTNT; kwargs...) where {TTNT<:AbstractTTN} - return loginner(ψ1, ψ2; kwargs...) +function logdot(tn1::AbstractTTN, tn2::AbstractTTN; kwargs...) + return loginner(tn1, tn2; kwargs...) end # TODO: stick with this traversal or find optimal contraction sequence? function ITensorMPS.loginner( - ψ1::TTNT, ψ2::TTNT; root_vertex=default_root_vertex(ψ1, ψ2) -)::Number where {TTNT<:AbstractTTN} - N = nv(ψ1) - if nv(ψ2) != N - throw(DimensionMismatch("inner: mismatched number of vertices $N and $(nv(ψ2))")) + tn1::AbstractTTN, tn2::AbstractTTN; root_vertex=default_root_vertex(tn1, tn2) +) + N = nv(tn1) + if nv(tn2) != N + throw(DimensionMismatch("inner: mismatched number of vertices $N and $(nv(tn2))")) end - ψ1dag = sim(dag(ψ1); sites=[]) - traversal_order = reverse(post_order_dfs_vertices(ψ1, root_vertex)) + tn1dag = sim(dag(tn1); sites=[]) + traversal_order = reverse(post_order_dfs_vertices(tn1, root_vertex)) - O = ψ1dag[root_vertex] * ψ2[root_vertex] + O = tn1dag[root_vertex] * tn2[root_vertex] normO = norm(O) log_inner_tot = log(normO) O ./= normO for v in traversal_order[2:end] - O = (O * ψ1dag[v]) * ψ2[v] + O = (O * tn1dag[v]) * tn2[v] normO = norm(O) log_inner_tot += log(normO) O ./= normO @@ -216,10 +213,10 @@ function ITensorMPS.loginner( return log_inner_tot end -function _add_maxlinkdims(ψs::AbstractTTN...) - maxdims = Dictionary{edgetype(ψs[1]),Int}() - for e in edges(ψs[1]) - maxdims[e] = sum(ψ -> linkdim(ψ, e), ψs) +function _add_maxlinkdims(tns::AbstractTTN...) + maxdims = Dictionary{edgetype(tns[1]),Int}() + for e in edges(tns[1]) + maxdims[e] = sum(tn -> linkdim(tn, e), tns) maxdims[reverse(e)] = maxdims[e] end return maxdims @@ -227,79 +224,63 @@ end # TODO: actually implement this? function Base.:+( - ::ITensors.Algorithm"densitymatrix", - ψs::TTNT...; + ::Algorithm"densitymatrix", + tns::AbstractTTN...; cutoff=1e-15, - root_vertex=default_root_vertex(ψs...), + root_vertex=default_root_vertex(tns...), kwargs..., -) where {TTNT<:AbstractTTN} +) return error("Not implemented (yet) for trees.") end function Base.:+( - ::ITensors.Algorithm"directsum", ψs::TTNT...; root_vertex=default_root_vertex(ψs...) -) where {TTNT<:AbstractTTN} - @assert all(ψ -> nv(first(ψs)) == nv(ψ), ψs) + ::Algorithm"directsum", tns::AbstractTTN...; root_vertex=default_root_vertex(tns...) +) + @assert all(tn -> nv(first(tns)) == nv(tn), tns) # Output state - ϕ = ttn(siteinds(ψs[1])) + tn = ttn(siteinds(tns[1])) - vs = post_order_dfs_vertices(ϕ, root_vertex) - es = post_order_dfs_edges(ϕ, root_vertex) - link_space = Dict{edgetype(ϕ),Index}() + vs = post_order_dfs_vertices(tn, root_vertex) + es = post_order_dfs_edges(tn, root_vertex) + link_space = Dict{edgetype(tn),Index}() for v in reverse(vs) edges = filter(e -> dst(e) == v || src(e) == v, es) dims_in = findall(e -> dst(e) == v, edges) dim_out = findfirst(e -> src(e) == v, edges) - - ls = [Tuple(only(linkinds(ψ, e)) for e in edges) for ψ in ψs] - ϕv, lv = directsum((ψs[i][v] => ls[i] for i in 1:length(ψs))...; tags=tags.(first(ls))) + ls = [Tuple(only(linkinds(tn, e)) for e in edges) for tn in tns] + tnv, lv = directsum( + (tns[i][v] => ls[i] for i in 1:length(tns))...; tags=tags.(first(ls)) + ) for din in dims_in link_space[edges[din]] = lv[din] end if !isnothing(dim_out) - ϕv = replaceind(ϕv, lv[dim_out] => dag(link_space[edges[dim_out]])) + tnv = replaceind(tnv, lv[dim_out] => dag(link_space[edges[dim_out]])) end - - ϕ[v] = ϕv + tn[v] = tnv end - return convert(TTNT, ϕ) + return tn end # TODO: switch default algorithm once more are implemented -function Base.:+(ψs::AbstractTTN...; alg=ITensors.Algorithm"directsum"(), kwargs...) - return +(ITensors.Algorithm(alg), ψs...; kwargs...) +function Base.:+(tns::AbstractTTN...; alg=Algorithm"directsum"(), kwargs...) + return +(Algorithm(alg), tns...; kwargs...) end -Base.:+(ψ::AbstractTTN) = ψ +Base.:+(tn::AbstractTTN) = tn -ITensors.add(ψs::AbstractTTN...; kwargs...) = +(ψs...; kwargs...) +ITensors.add(tns::AbstractTTN...; kwargs...) = +(tns...; kwargs...) -function Base.:-(ψ1::AbstractTTN, ψ2::AbstractTTN; kwargs...) - return +(ψ1, -ψ2; kwargs...) +function Base.:-(tn1::AbstractTTN, tn2::AbstractTTN; kwargs...) + return +(tn1, -tn2; kwargs...) end function ITensors.add(tn1::AbstractTTN, tn2::AbstractTTN; kwargs...) return +(tn1, tn2; kwargs...) end -# TODO: Delete this -function ITensors.permute( - ψ::AbstractTTN, ::Tuple{typeof(linkind),typeof(siteinds),typeof(linkind)} -) - ψ̃ = copy(ψ) - for v in vertices(ψ) - ls = [only(linkinds(ψ, n => v)) for n in neighbors(ψ, v)] # TODO: won't work for multiple indices per link... - ss = TupleTools.sort(Tuple(siteinds(ψ, v)); by=plev) - setindex_preserve_graph!( - ψ̃, permute(ψ[v], filter(!isnothing, (ls[1], ss..., ls[2:end]...))), v - ) - end - ψ̃ = set_ortho_center(ψ̃, ortho_center(ψ)) - return ψ̃ -end - function Base.isapprox( x::AbstractTTN, y::AbstractTTN; @@ -372,9 +353,8 @@ function ITensorMPS.expect( # ToDo: verify that this is a sane default root_vertex=default_root_vertex(siteinds(state)), ) - # ToDo: for performance it may be beneficial to also implement expect! and change the orthogonality center in place - # assuming that the next algorithmic step can make use of the orthogonality center being moved to a different vertex - # ToDo: Verify that this is indeed the correct order for performance + # TODO: Optimize this with proper caching. + state /= norm(state) sites = siteinds(state) ordered_vertices = reverse(post_order_dfs_vertices(sites, root_vertex)) res = Dictionary(vertices, undef) diff --git a/src/treetensornetworks/opsum_to_ttn.jl b/src/treetensornetworks/opsum_to_ttn.jl index 4ffd1743..9b555547 100644 --- a/src/treetensornetworks/opsum_to_ttn.jl +++ b/src/treetensornetworks/opsum_to_ttn.jl @@ -1,27 +1,10 @@ -using Graphs: degree -using Graphs: is_tree -using ITensors: flux -using ITensors: has_fermion_string -using ITensors: itensor -using ITensors: ops -using ITensors: removeqns -using ITensors: space -using ITensors: val -using ITensors.ITensorMPS: ITensorMPS -using ITensors.ITensorMPS: cutoff -using ITensors.ITensorMPS: linkdims -using ITensors.LazyApply: coefficient -using ITensors.LazyApply: Sum -using ITensors.LazyApply: Prod -using ITensors.NDTensors: Block -using ITensors.NDTensors: maxdim -using ITensors.NDTensors: nblocks -using ITensors.NDTensors: nnzblocks -using ITensors.Ops: OpSum -using ITensors.Ops: Op -using NamedGraphs: degrees -using NamedGraphs: is_leaf -using NamedGraphs: vertex_path +using Graphs: degree, is_tree +using ITensors: flux, has_fermion_string, itensor, ops, removeqns, space, val +using ITensors.ITensorMPS: ITensorMPS, cutoff, linkdims, truncate! +using ITensors.LazyApply: Prod, Sum, coefficient +using ITensors.NDTensors: Block, maxdim, nblocks, nnzblocks +using ITensors.Ops: Op, OpSum +using NamedGraphs: degrees, is_leaf, vertex_path using StaticArrays: MVector # convert ITensors.OpSum to TreeTensorNetwork diff --git a/src/treetensornetworks/ttn.jl b/src/treetensornetworks/ttn.jl index 63afbc18..31bc9770 100644 --- a/src/treetensornetworks/ttn.jl +++ b/src/treetensornetworks/ttn.jl @@ -1,223 +1,110 @@ -using ITensors.ITensorMPS: randomMPS, replacebond! -using ITensors.NDTensors: truncate! +using Graphs: path_graph +using ITensors: ITensor using LinearAlgebra: normalize -using NamedGraphs: named_path_graph -using Random: randn! +using NamedGraphs: vertextype """ TreeTensorNetwork{V} <: AbstractTreeTensorNetwork{V} - -# Fields - -- itensor_network::ITensorNetwork{V} -- ortho_lims::Vector{V}: A vector of vertices defining the orthogonality limits. - """ struct TreeTensorNetwork{V} <: AbstractTreeTensorNetwork{V} - itensor_network::ITensorNetwork{V} - ortho_center::Vector{V} - function TreeTensorNetwork{V}( - itensor_network::ITensorNetwork, ortho_center::Vector=vertices(itensor_network) - ) where {V} - @assert is_tree(itensor_network) - return new{V}(itensor_network, ortho_center) + tensornetwork::ITensorNetwork{V} + ortho_region::Vector{V} + global function _TreeTensorNetwork(tensornetwork::ITensorNetwork, ortho_region) + @assert is_tree(tensornetwork) + return new{vertextype(tensornetwork)}(tensornetwork, ortho_region) + end + global function _TreeTensorNetwork(tensornetwork::ITensorNetwork) + return _TreeTensorNetwork(tensornetwork, vertices(tensornetwork)) end end -const TTN = TreeTensorNetwork - -function data_graph_type(G::Type{<:TTN}) - return data_graph_type(fieldtype(G, :itensor_network)) +function TreeTensorNetwork(tn::ITensorNetwork; ortho_region=vertices(tn)) + return _TreeTensorNetwork(tn, ortho_region) end - -function Base.copy(ψ::TTN) - return ttn(copy(ψ.itensor_network), copy(ψ.ortho_center)) +function TreeTensorNetwork{V}(tn::ITensorNetwork) where {V} + return TreeTensorNetwork(ITensorNetwork{V}(tn)) end +const TTN = TreeTensorNetwork + # Field access -itensor_network(ψ::TTN) = getfield(ψ, :itensor_network) +ITensorNetwork(tn::TTN) = getfield(tn, :tensornetwork) +ortho_region(tn::TTN) = getfield(tn, :ortho_region) # Required for `AbstractITensorNetwork` interface -data_graph(ψ::TTN) = data_graph(itensor_network(ψ)) - -# -# Constructor -# +data_graph(tn::TTN) = data_graph(ITensorNetwork(tn)) -ttn(tn::ITensorNetwork, args...) = TTN{vertextype(tn)}(tn, args...) - -# catch-all for default ElType -function ttn(g::AbstractGraph, args...; kwargs...) - return ttn(Float64, g, args...; kwargs...) -end - -function ttn(eltype::Type{<:Number}, graph::AbstractGraph, args...; kwargs...) - itensor_network = ITensorNetwork(eltype, graph; kwargs...) - return ttn(itensor_network, args...) +function data_graph_type(G::Type{<:TTN}) + return data_graph_type(fieldtype(G, :tensornetwork)) end -# construct from given state (map) -function ttn(::Type{ElT}, is::AbstractIndsNetwork, initstate, args...) where {ElT<:Number} - itensor_network = ITensorNetwork(ElT, is, initstate) - return ttn(itensor_network, args...) +function Base.copy(tn::TTN) + return _TreeTensorNetwork(copy(ITensorNetwork(tn)), copy(ortho_region(tn))) end -# Constructor from a collection of ITensors. -# TODO: Support other collections like `Dictionary`, -# interface for custom vertex names. -function ttn(ts::ITensorCollection) - return ttn(ITensorNetwork(ts)) -end +# +# Constructor +# -# TODO: Implement `random_circuit_ttn` for non-trivial -# bond dimensions and correlations. -# TODO: Implement random_ttn for QN-Index -function random_ttn(args...; kwargs...) - T = ttn(args...; kwargs...) - randn!.(vertex_data(T)) - normalize!.(vertex_data(T)) - return T +function set_ortho_region(tn::TTN, ortho_region) + return ttn(ITensorNetwork(tn); ortho_region) end -function random_mps( - external_inds::Vector{<:Index}; - states=nothing, - internal_inds_space=trivial_space(external_inds), -) - # TODO: Implement in terms of general - # `random_ttn` constructor - tn_mps = if isnothing(states) - randomMPS(external_inds; linkdims=internal_inds_space) - else - randomMPS(external_inds, states; linkdims=internal_inds_space) +function ttn(args...; ortho_region=nothing) + tn = ITensorNetwork(args...) + if isnothing(ortho_region) + ortho_region = vertices(tn) end - return ttn([tn_mps[v] for v in eachindex(tn_mps)]) + return _TreeTensorNetwork(tn, ortho_region) end -# -# Construction from operator (map) -# - -function ttn( - ::Type{ElT}, - sites_map::Pair{<:AbstractIndsNetwork,<:AbstractIndsNetwork}, - ops::Dictionary; - kwargs..., -) where {ElT<:Number} - s = first(sites_map) # TODO: Use the sites_map - N = nv(sites) - os = Prod{Op}() - for v in vertices(sites) - os *= Op(ops[v], v) +function mps(args...; ortho_region=nothing) + # TODO: Check it is a path graph. + tn = ITensorNetwork(args...) + if isnothing(ortho_region) + ortho_region = vertices(tn) end - T = ttn(ElT, os, sites; kwargs...) - # see https://github.com/ITensor/ITensors.jl/issues/526 - lognormT = lognorm(T) - T /= exp(lognormT / N) # TODO: fix broadcasting for in-place assignment - truncate!(T; cutoff=1e-15) - T *= exp(lognormT / N) - return T + return _TreeTensorNetwork(tn, ortho_region) end -function ttn( - ::Type{ElT}, - sites_map::Pair{<:AbstractIndsNetwork,<:AbstractIndsNetwork}, - fops::Function; - kwargs..., -) where {ElT<:Number} - sites = first(sites_map) # TODO: Use the sites_map - ops = Dictionary(vertices(sites), map(v -> fops(v), vertices(sites))) - return ttn(ElT, sites, ops; kwargs...) +function mps(f, is::Vector{<:Index}; kwargs...) + return mps(f, path_indsnetwork(is); kwargs...) end -function ttn( - ::Type{ElT}, - sites_map::Pair{<:AbstractIndsNetwork,<:AbstractIndsNetwork}, - op::String; - kwargs..., -) where {ElT<:Number} - sites = first(sites_map) # TODO: Use the sites_map - ops = Dictionary(vertices(sites), fill(op, nv(sites))) - return ttn(ElT, sites, ops; kwargs...) -end - -# construct from dense ITensor, using IndsNetwork of site indices -function ttn(A::ITensor, is::IndsNetwork; ortho_center=default_root_vertex(is), kwargs...) +# Construct from dense ITensor, using IndsNetwork of site indices. +function ttn(a::ITensor, is::IndsNetwork; ortho_region=[default_root_vertex(is)], kwargs...) for v in vertices(is) - @assert hasinds(A, is[v]) + @assert hasinds(a, is[v]) end - @assert ortho_center ∈ vertices(is) - ψ = ITensorNetwork(is) - Ã = A - for e in post_order_dfs_edges(ψ, ortho_center) + @assert ortho_region ⊆ vertices(is) + tn = ITensorNetwork(is) + ortho_center = first(ortho_region) + for e in post_order_dfs_edges(tn, ortho_center) left_inds = uniqueinds(is, e) - L, R = factorize(Ã, left_inds; tags=edge_tag(e), ortho="left", kwargs...) - l = commonind(L, R) - ψ[src(e)] = L - is[e] = [l] - Ã = R + a_l, a_r = factorize(a, left_inds; tags=edge_tag(e), ortho="left", kwargs...) + tn[src(e)] = a_l + is[e] = commoninds(a_l, a_r) + a = a_r end - ψ[ortho_center] = Ã - T = ttn(ψ) - T = orthogonalize(T, ortho_center) - return T -end - -# Special constructors - -function mps(external_inds::Vector{<:Index}; states) - return mps(map(i -> [i], external_inds); states) + tn[ortho_center] = a + ttn_a = ttn(tn) + return orthogonalize(ttn_a, ortho_center) end -function mps(external_inds::Vector{<:Vector{<:Index}}; states) - g = named_path_graph(length(external_inds)) - tn = ITensorNetwork(g) - for v in vertices(tn) - tn[v] = state(only(external_inds[v]), states(v)) - end - tn = insert_missing_internal_inds( - tn, edges(g); internal_inds_space=trivial_space(indtype(external_inds)) - ) - return ttn(tn) +function random_ttn(args...; kwargs...) + # TODO: Check it is a tree graph. + return normalize(_TreeTensorNetwork(random_tensornetwork(args...; kwargs...))) end -# -# Utility -# - -function ITensorMPS.replacebond!(T::TTN, edge::AbstractEdge, phi::ITensor; kwargs...) - ortho::String = get(kwargs, :ortho, "left") - swapsites::Bool = get(kwargs, :swapsites, false) - which_decomp::Union{String,Nothing} = get(kwargs, :which_decomp, nothing) - normalize::Bool = get(kwargs, :normalize, false) - - indsTe = inds(T[src(edge)]) - if swapsites - sb = siteinds(M, src(edge)) - sbp1 = siteinds(M, dst(edge)) - indsTe = replaceinds(indsTe, sb, sbp1) - end - - L, R, spec = factorize( - phi, indsTe; which_decomp=which_decomp, tags=tags(T, edge), kwargs... - ) - - T[src(edge)] = L - T[dst(edge)] = R - if ortho == "left" - normalize && (T[dst(edge)] ./= norm(T[dst(edge)])) - isortho(T) && (T = set_ortho_center(T, [dst(edge)])) - elseif ortho == "right" - normalize && (T[src(edge)] ./= norm(T[src(edge)])) - isortho(T) && (T = set_ortho_center(T, [src(edge)])) - end - return spec +function random_mps(args...; kwargs...) + # TODO: Check it is a path graph. + return random_ttn(args...; kwargs...) end -function ITensorMPS.replacebond!(T::TTN, edge::Pair, phi::ITensor; kwargs...) - return replacebond!(T, edgetype(T)(edge), phi; kwargs...) +function random_mps(f, is::Vector{<:Index}; kwargs...) + return random_mps(f, path_indsnetwork(is); kwargs...) end -function ITensorMPS.replacebond(T0::TTN, args...; kwargs...) - return replacebond!(copy(T0), args...; kwargs...) +function random_mps(s::Vector{<:Index}; kwargs...) + return random_mps(path_indsnetwork(s); kwargs...) end diff --git a/test/test_additensornetworks.jl b/test/test_additensornetworks.jl index fdc497d6..e2ab2d89 100644 --- a/test/test_additensornetworks.jl +++ b/test/test_additensornetworks.jl @@ -10,8 +10,8 @@ using Test: @test, @testset Random.seed!(5623) g = named_grid((2, 3)) s = siteinds("S=1/2", g) - ψ1 = ITensorNetwork(s, v -> "↑") - ψ2 = ITensorNetwork(s, v -> "↓") + ψ1 = ITensorNetwork(v -> "↑", s) + ψ2 = ITensorNetwork(v -> "↓", s) ψ_GHZ = ψ1 + ψ2 diff --git a/test/test_itensornetwork.jl b/test/test_itensornetwork.jl index c9d59c74..62f53d07 100644 --- a/test/test_itensornetwork.jl +++ b/test/test_itensornetwork.jl @@ -1,8 +1,8 @@ @eval module $(gensym()) -using DataGraphs: vertex_data using Dictionaries: Dictionary using Distributions: Uniform using Graphs: + degree, dijkstra_shortest_paths, edges, grid, @@ -18,6 +18,7 @@ using ITensors: ITensors, Index, ITensor, + Op, commonind, commoninds, contract, @@ -26,10 +27,13 @@ using ITensors: hasinds, inds, itensor, + onehot, order, + randomITensor, + scalartype, sim, uniqueinds -using ITensors.NDTensors: dims +using ITensors.NDTensors: dim using ITensorNetworks: ITensorNetworks, ⊗, @@ -42,12 +46,15 @@ using ITensorNetworks: linkinds, orthogonalize, random_tensornetwork, - siteinds + siteinds, + ttn using LinearAlgebra: factorize using NamedGraphs: NamedEdge, incident_edges, named_comb_tree, named_grid using Random: Random, randn! using Test: @test, @test_broken, @testset +const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) + @testset "ITensorNetwork tests" begin @testset "ITensorNetwork Basics" begin Random.seed!(1234) @@ -87,7 +94,9 @@ using Test: @test, @test_broken, @testset # TODO: Support this syntax, maybe rename `subgraph`. @test_broken induced_subgraph(tn, [(1,), (2,)]) isa ITensorNetwork - randn!.(vertex_data(tn)) + for v in vertices(tn) + tn[v] = randn!(tn[v]) + end tn′ = sim(dag(tn); sites=[]) @test tn′ isa ITensorNetwork @@ -126,7 +135,7 @@ using Test: @test, @test_broken, @testset dims = (2, 2) g = named_grid(dims) s = siteinds("S=1/2", g) - ψ = ITensorNetwork(s, v -> "↑") + ψ = ITensorNetwork(v -> "↑", s) tn = inner_network(ψ, ψ) tn_2 = contract(tn, ((1, 2), 2) => ((1, 2), 1)) @test !has_vertex(tn_2, ((1, 2), 2)) @@ -137,7 +146,7 @@ using Test: @test, @test_broken, @testset dims = (2, 2) g = named_grid(dims) s = siteinds("S=1/2", g) - ψ = ITensorNetwork(s, v -> "↑") + ψ = ITensorNetwork(v -> "↑", s) rem_vertex!(ψ, (1, 2)) tn = inner_network(ψ, ψ) @test !has_vertex(tn, ((1, 2), 1)) @@ -150,8 +159,8 @@ using Test: @test, @test_broken, @testset @test has_vertex(tn, ((2, 2), 2)) end - @testset "Custom element type" for elt in (Float32, Float64, ComplexF32, ComplexF64), - link_space in (nothing, 3), + @testset "Custom element type (eltype=$elt)" for elt in elts, + kwargs in ((;), (; link_space=3)), g in ( grid((4,)), named_grid((3, 3)), @@ -159,24 +168,91 @@ using Test: @test, @test_broken, @testset siteinds("S=1/2", named_grid((3, 3))), ) - ψ = ITensorNetwork(g; link_space) do v, inds... - return itensor(randn(elt, dims(inds)...), inds...) + ψ = ITensorNetwork(g; kwargs...) do v + return inds -> itensor(randn(elt, dim.(inds)...), inds) end @test eltype(ψ[first(vertices(ψ))]) == elt - ψ = ITensorNetwork(g; link_space) do v, inds... - return itensor(randn(dims(inds)...), inds...) + ψ = ITensorNetwork(g; kwargs...) do v + return inds -> itensor(randn(dim.(inds)...), inds) end @test eltype(ψ[first(vertices(ψ))]) == Float64 - ψ = random_tensornetwork(elt, g; link_space) + ψ = random_tensornetwork(elt, g; kwargs...) @test eltype(ψ[first(vertices(ψ))]) == elt - ψ = random_tensornetwork(g; link_space) + ψ = random_tensornetwork(g; kwargs...) @test eltype(ψ[first(vertices(ψ))]) == Float64 - ψ = ITensorNetwork(elt, undef, g; link_space) + ψ = ITensorNetwork(elt, undef, g; kwargs...) @test eltype(ψ[first(vertices(ψ))]) == elt ψ = ITensorNetwork(undef, g) @test eltype(ψ[first(vertices(ψ))]) == Float64 end + @testset "Product state constructors" for elt in elts + dims = (2, 2) + g = named_comb_tree(dims) + s = siteinds("S=1/2", g) + state1 = ["↑" "↓"; "↓" "↑"] + state2 = reshape([[1, 0], [0, 1], [0, 1], [1, 0]], 2, 2) + each_args = (; + ferro=( + ("↑",), + (elt, "↑"), + (Returns(i -> ITensor([1, 0], i)),), + (elt, Returns(i -> ITensor([1, 0], i))), + (Returns([1, 0]),), + (elt, Returns([1, 0])), + ), + antiferro=( + (state1,), + (elt, state1), + (Dict(CartesianIndices(dims) .=> state1),), + (elt, Dict(CartesianIndices(dims) .=> state1)), + (Dict(Tuple.(CartesianIndices(dims)) .=> state1),), + (elt, Dict(Tuple.(CartesianIndices(dims)) .=> state1)), + (Dictionary(CartesianIndices(dims), state1),), + (elt, Dictionary(CartesianIndices(dims), state1)), + (Dictionary(Tuple.(CartesianIndices(dims)), state1),), + (elt, Dictionary(Tuple.(CartesianIndices(dims)), state1)), + (state2,), + (elt, state2), + (Dict(CartesianIndices(dims) .=> state2),), + (elt, Dict(CartesianIndices(dims) .=> state2)), + (Dict(Tuple.(CartesianIndices(dims)) .=> state2),), + (elt, Dict(Tuple.(CartesianIndices(dims)) .=> state2)), + (Dictionary(CartesianIndices(dims), state2),), + (elt, Dictionary(CartesianIndices(dims), state2)), + (Dictionary(Tuple.(CartesianIndices(dims)), state2),), + (elt, Dictionary(Tuple.(CartesianIndices(dims)), state2)), + ), + ) + for pattern in keys(each_args) + for args in each_args[pattern] + x = ITensorNetwork(args..., s) + if first(args) === elt + @test scalartype(x) === elt + else + @test scalartype(x) === Float64 + end + for v in vertices(x) + xᵛ = x[v] + @test degree(x, v) + 1 == ndims(xᵛ) + sᵛ = only(siteinds(x, v)) + for w in neighbors(x, v) + lʷ = only(linkinds(x, v => w)) + @test dim(lʷ) == 1 + xᵛ *= onehot(lʷ => 1) + end + @test ndims(xᵛ) == 1 + a = if pattern == :ferro + [1, 0] + elseif pattern == :antiferro + iseven(sum(v)) ? [1, 0] : [0, 1] + end + @test xᵛ == ITensor(a, sᵛ) + end + end + end + end + @testset "random_tensornetwork with custom distributions" begin distribution = Uniform(-1.0, 1.0) tn = random_tensornetwork(distribution, named_grid(4); link_space=2) @@ -283,7 +359,7 @@ using Test: @test, @test_broken, @testset s = siteinds("S=1/2", g) state_map(v::Tuple) = iseven(sum(isodd.(v))) ? "↑" : "↓" - ψ = ITensorNetwork(s, state_map) + ψ = ITensorNetwork(state_map, s) t = ψ[2, 2] si = only(siteinds(ψ, (2, 2))) bi = map(e -> only(linkinds(ψ, e)), incident_edges(ψ, (2, 2))) @@ -291,7 +367,7 @@ using Test: @test, @test_broken, @testset @test abs(t[si => "↑", [b => end for b in bi]...]) == 1.0 # insert_links introduces extra signs through factorization... @test t[si => "↓", [b => end for b in bi]...] == 0.0 - ϕ = ITensorNetwork(elt, s, state_map) + ϕ = ITensorNetwork(elt, state_map, s) t = ϕ[2, 2] si = only(siteinds(ϕ, (2, 2))) bi = map(e -> only(linkinds(ϕ, e)), incident_edges(ϕ, (2, 2))) diff --git a/test/test_tebd.jl b/test/test_tebd.jl index 8c5926ea..99aa9bbc 100644 --- a/test/test_tebd.jl +++ b/test/test_tebd.jl @@ -42,7 +42,7 @@ ITensors.disable_warn_order() # Sequence for contracting expectation values inner_sequence = reduce((x, y) -> [x, y], vec(Tuple.(CartesianIndices(dims)))) - ψ_init = ITensorNetwork(s, v -> "↑") + ψ_init = ITensorNetwork(v -> "↑", s) E0 = expect(ℋ, ψ_init; sequence=inner_sequence) ψ = tebd( group_terms(ℋ, g), diff --git a/test/test_tno.jl b/test/test_tno.jl index 4a3f5018..1512768c 100644 --- a/test/test_tno.jl +++ b/test/test_tno.jl @@ -35,10 +35,12 @@ using Test: @test, @testset ψ = random_tensornetwork(s; link_space=2) ψ_gated = copy(ψ) + for gate in gates ψ_gated = apply(gate, ψ_gated) end ψ_tnod = copy(ψ) + for tno in tnos ψ_tnod = flatten_networks(ψ_tnod, tno) for v in vertices(ψ_tnod) @@ -54,6 +56,7 @@ using Test: @test, @testset z1 = contract_inner(ψ_gated, ψ_gated) z2 = contract_inner(ψ_tnod, ψ_tnod) z3 = contract_inner(ψ_tno, ψ_tno) + f12 = contract_inner(ψ_tnod, ψ_gated) / sqrt(z1 * z2) f13 = contract_inner(ψ_tno, ψ_gated) / sqrt(z1 * z3) f23 = contract_inner(ψ_tno, ψ_tnod) / sqrt(z2 * z3) diff --git a/test/test_treetensornetworks/test_expect.jl b/test/test_treetensornetworks/test_expect.jl index 3acbd83b..d8eb365c 100644 --- a/test/test_treetensornetworks/test_expect.jl +++ b/test/test_treetensornetworks/test_expect.jl @@ -2,13 +2,15 @@ using Graphs: vertices using ITensors.ITensorMPS: MPS using ITensorNetworks: ttn, expect, random_mps, siteinds +using LinearAlgebra: norm using NamedGraphs: named_comb_tree using Test: @test, @testset @testset "MPS expect comparison with ITensors" begin - N = 25 + N = 4 s = siteinds("S=1/2", N) - a = random_mps(s; internal_inds_space=100) + a = random_mps(s; link_space=100) + @test norm(a) ≈ 1 b = MPS([a[v] for v in vertices(a)]) res_a = expect("Sz", a) res_b = expect(b, "Sz") @@ -17,7 +19,7 @@ using Test: @test, @testset end @testset "TTN expect" begin - tooth_lengths = fill(5, 6) + tooth_lengths = fill(2, 2) c = named_comb_tree(tooth_lengths) s = siteinds("S=1/2", c) d = Dict() @@ -27,7 +29,7 @@ end magnetization[v] = isodd(i) ? 0.5 : -0.5 end states = v -> d[v] - state = ttn(s, states) + state = ttn(states, s) res = expect("Sz", state) @test all([isapprox(res[v], magnetization[v]; atol=1e-8) for v in vertices(s)]) end diff --git a/test/test_treetensornetworks/test_position.jl b/test/test_treetensornetworks/test_position.jl index 90ec7f30..92988ce7 100644 --- a/test/test_treetensornetworks/test_position.jl +++ b/test/test_treetensornetworks/test_position.jl @@ -31,7 +31,7 @@ using Test d[v] = isodd(i) ? "Up" : "Dn" end states = v -> d[v] - psi = ttn(s, states) + psi = ttn(states, s) # actual test, verifies that position is out of place vs = vertices(s) diff --git a/test/test_treetensornetworks/test_solvers/test_contract.jl b/test/test_treetensornetworks/test_solvers/test_contract.jl index ac1376da..f21a558e 100644 --- a/test/test_treetensornetworks/test_solvers/test_contract.jl +++ b/test/test_treetensornetworks/test_solvers/test_contract.jl @@ -25,8 +25,7 @@ using Test: @test, @test_broken, @testset @testset "Contract MPO" begin N = 20 s = siteinds("S=1/2", N) - psi = random_mps(s; internal_inds_space=8) - + psi = random_mps(s; link_space=8) os = OpSum() for j in 1:(N - 1) os += 0.5, "S+", j, "S-", j + 1 @@ -42,15 +41,15 @@ using Test: @test, @test_broken, @testset # Test basic usage with default parameters Hpsi = apply(H, psi; alg="fit", init=psi, nsweeps=1) - @test inner(psi, Hpsi) ≈ inner(psi', H, psi) atol = 1e-5 + @test inner(psi, Hpsi) ≈ inner(psi', H, psi) rtol = 1e-5 # Test variational compression via DMRG Hfit = ProjOuterProdTTN(psi', H) Hpsi_via_dmrg = dmrg(Hfit, psi; updater_kwargs=(; which_eigval=:LR,), nsweeps=1) - @test abs(inner(Hpsi_via_dmrg, Hpsi / norm(Hpsi))) ≈ 1 atol = 1e-4 + @test abs(inner(Hpsi_via_dmrg, Hpsi / norm(Hpsi))) ≈ 1 rtol = 1e-4 # Test whether the interface works for ProjTTNSum with factors Hfit = ProjTTNSum([ProjOuterProdTTN(psi', H), ProjOuterProdTTN(psi', H)], [-0.2, -0.8]) Hpsi_via_dmrg = dmrg(Hfit, psi; nsweeps=1, updater_kwargs=(; which_eigval=:SR,)) - @test abs(inner(Hpsi_via_dmrg, Hpsi / norm(Hpsi))) ≈ 1 atol = 1e-4 + @test abs(inner(Hpsi_via_dmrg, Hpsi / norm(Hpsi))) ≈ 1 rtol = 1e-4 # Test basic usage for use with multiple ProjOuterProdTTN with default parameters # BLAS.axpy-like test @@ -63,14 +62,14 @@ using Test: @test, @test_broken, @testset Hpsi = ITensorNetworks.sum_apply( [(H, psi), (minus_identity, psi)]; alg="fit", init=psi, nsweeps=3 ) - @test inner(psi, Hpsi) ≈ (inner(psi', H, psi) - norm(psi)^2) atol = 1e-5 + @test inner(psi, Hpsi) ≈ (inner(psi', H, psi) - norm(psi)^2) rtol = 1e-5 # Test the above via DMRG # ToDo: Investigate why this is broken Hfit = ProjTTNSum([ProjOuterProdTTN(psi', H), ProjOuterProdTTN(psi', identity)], [-1, 1]) Hpsi_normalized = ITensorNetworks.dmrg( Hfit, psi; nsweeps=3, updater_kwargs=(; which_eigval=:SR) ) - @test_broken abs(inner(Hpsi, (Hpsi_normalized) / norm(Hpsi))) ≈ 1 atol = 1e-5 + @test_broken abs(inner(Hpsi, (Hpsi_normalized) / norm(Hpsi))) ≈ 1 rtol = 1e-5 # # Change "top" indices of MPO to be a different set @@ -84,16 +83,16 @@ using Test: @test, @test_broken, @testset end # Test with nsweeps=3 Hpsi = contract(H, psi; alg="fit", init=psit, nsweeps=3) - @test inner(psit, Hpsi) ≈ inner(psit, H, psi) atol = 1e-5 + @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-5 # Test with less good initial guess MPS not equal to psi psi_guess = truncate(psit; maxdim=2) Hpsi = contract(H, psi; alg="fit", nsweeps=4, init=psi_guess) - @test inner(psit, Hpsi) ≈ inner(psit, H, psi) atol = 1e-5 + @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-5 # Test with nsite=1 - Hpsi_guess = random_mps(t; internal_inds_space=32) + Hpsi_guess = random_mps(t; link_space=32) Hpsi = contract(H, psi; alg="fit", init=Hpsi_guess, nsites=1, nsweeps=4) - @test inner(psit, Hpsi) ≈ inner(psit, H, psi) atol = 1e-4 + @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-4 end @testset "Contract TTN" begin @@ -109,12 +108,12 @@ end # Test basic usage with default parameters Hpsi = apply(H, psi; alg="fit", init=psi, nsweeps=1, cutoff=eps()) - @test inner(psi, Hpsi) ≈ inner(psi', H, psi) atol = 1e-5 + @test inner(psi, Hpsi) ≈ inner(psi', H, psi) rtol = 1e-5 # Test usage with non-default parameters Hpsi = apply( H, psi; alg="fit", init=psi, nsweeps=5, maxdim=[16, 32], cutoff=[1e-4, 1e-8, 1e-12] ) - @test inner(psi, Hpsi) ≈ inner(psi', H, psi) atol = 1e-2 + @test inner(psi, Hpsi) ≈ inner(psi', H, psi) rtol = 1e-2 # Test basic usage for multiple ProjOuterProdTTN with default parameters # BLAS.axpy-like test @@ -124,7 +123,7 @@ end Hpsi = ITensorNetworks.sum_apply( [(H, psi), (minus_identity, psi)]; alg="fit", init=psi, nsweeps=1 ) - @test inner(psi, Hpsi) ≈ (inner(psi', H, psi) - norm(psi)^2) atol = 1e-5 + @test inner(psi, Hpsi) ≈ (inner(psi', H, psi) - norm(psi)^2) rtol = 1e-5 # # Change "top" indices of TTN to be a different set @@ -136,17 +135,17 @@ end # Test with nsweeps=2 Hpsi = contract(H, psi; alg="fit", init=psit, nsweeps=2) - @test inner(psit, Hpsi) ≈ inner(psit, H, psi) atol = 1e-5 + @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-5 # Test with less good initial guess MPS not equal to psi Hpsi_guess = truncate(psit; maxdim=2) Hpsi = contract(H, psi; alg="fit", nsweeps=4, init=Hpsi_guess) - @test inner(psit, Hpsi) ≈ inner(psit, H, psi) atol = 1e-5 + @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-5 # Test with nsite=1 Hpsi_guess = random_ttn(t; link_space=32) Hpsi = contract(H, psi; alg="fit", nsites=1, nsweeps=10, init=Hpsi_guess) - @test inner(psit, Hpsi) ≈ inner(psit, H, psi) atol = 1e-2 + @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-2 end @testset "Contract TTN with dangling inds" begin diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg.jl b/test/test_treetensornetworks/test_solvers/test_dmrg.jl index 0fc8e8a4..3680a5d5 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg.jl @@ -43,7 +43,7 @@ ITensors.disable_auto_fermion() H = mpo(os, s) - psi = random_mps(s; internal_inds_space=20) + psi = random_mps(s; link_space=20) nsweeps = 10 maxdim = [10, 20, 40, 100] @@ -85,7 +85,7 @@ end os += "Sz", j, "Sz", j + 1 end H = mpo(os, s) - psi = random_mps(s; internal_inds_space=20) + psi = random_mps(s; link_space=20) nsweeps = 4 maxdim = [20, 40, 80, 80] @@ -121,7 +121,7 @@ end os += "Sz", j, "Sz", j + 1 end H = mpo(os, s) - psi = random_mps(s; internal_inds_space=10) + psi = random_mps(s; link_space=10) nsweeps = 4 maxdim = [10, 20, 40, 80] @@ -153,7 +153,7 @@ end H = mpo(os, s) - psi = random_mps(s; internal_inds_space=20) + psi = random_mps(s; link_space=20) # Choose nsweeps to be less than length of arrays nsweeps = 5 @@ -188,7 +188,7 @@ end d[v] = isodd(i) ? "Up" : "Dn" end states = v -> d[v] - psi = ttn(s, states) + psi = ttn(states, s) # psi = random_ttn(s; link_space=20) #FIXME: random_ttn broken for QN conserving case @@ -251,7 +251,7 @@ end d[v] = isodd(i) ? "Up" : "Dn" end states = v -> d[v] - psi = ttn(s, states) + psi = ttn(states, s) psi = dmrg( H, psi; nsweeps, maxdim, cutoff, nsites, updater_kwargs=(; krylovdim=3, maxiter=1) ) diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl index e821649f..74fb2b2b 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl @@ -21,7 +21,7 @@ using Test: @test, @testset h = W * (2 * rand(n) .- 1) H = mpo(ModelHamiltonians.heisenberg(n; h), s) - ψ = mps(s; states=(v -> rand(["↑", "↓"]))) + ψ = mps(v -> rand(["↑", "↓"]), s) dmrg_x_kwargs = (nsweeps=20, normalize=true, maxdim=20, cutoff=1e-10, outputlevel=0) @@ -52,10 +52,7 @@ end h = Dictionary(vertices(c), W * (2 * rand(nv(c)) .- 1)) H = ttn(ModelHamiltonians.heisenberg(c; h), s) - - # TODO: Use `ttn(s; states=v -> rand(["↑", "↓"]))` or - # `ttns(s; states=v -> rand(["↑", "↓"]))` - ψ = normalize(ttn(s, v -> rand(["↑", "↓"]))) + ψ = normalize(ttn(v -> rand(["↑", "↓"]), s)) dmrg_x_kwargs = (nsweeps=20, normalize=true, maxdim=20, cutoff=1e-10, outputlevel=0) diff --git a/test/test_treetensornetworks/test_solvers/test_linsolve.jl b/test/test_treetensornetworks/test_solvers/test_linsolve.jl index cb2af561..3c62bfc0 100644 --- a/test/test_treetensornetworks/test_solvers/test_linsolve.jl +++ b/test/test_treetensornetworks/test_solvers/test_linsolve.jl @@ -22,29 +22,22 @@ using Test: @test, @test_broken, @testset end H = mpo(os, s) - states = [isodd(n) ? "Up" : "Dn" for n in 1:N] - - ## Correct x is x_c - #x_c = random_mps(s, state; linkdims=4) - ## Compute b - #b = apply(H, x_c; cutoff) - - #x0 = random_mps(s, state; linkdims=10) - #x = linsolve(H, b, x0; cutoff, maxdim, nsweeps, ishermitian=true, solver_tol=1E-6) - - #@show norm(x - x_c) - #@test norm(x - x_c) < 1E-4 - # # Test complex case # Random.seed!(1234) - x_c = - random_mps(s; states, internal_inds_space=4) + - 0.1im * random_mps(s; states, internal_inds_space=2) + + ## TODO: Need to add support for `random_mps`/`random_tensornetwork` with state input. + ## states = [isodd(n) ? "Up" : "Dn" for n in 1:N] + ## x_c = random_mps(states, s; link_space=4) + 0.1im * random_mps(states, s; link_space=2) + x_c = random_mps(s; link_space=4) + 0.1im * random_mps(s; link_space=2) + b = apply(H, x_c; alg="fit", nsweeps=3, init=x_c) #cutoff is unsupported kwarg for apply/contract - x0 = random_mps(s; states, internal_inds_space=10) + ## TODO: Need to add support for `random_mps`/`random_tensornetwork` with state input. + ## x0 = random_mps(states, s; link_space=10) + x0 = random_mps(s; link_space=10) + x = @test_broken linsolve( H, b, x0; cutoff, maxdim, nsweeps, updater_kwargs=(; tol=1E-6, ishermitian=true) ) diff --git a/test/test_treetensornetworks/test_solvers/test_tdvp.jl b/test/test_treetensornetworks/test_solvers/test_tdvp.jl index 569662b3..126e70e0 100644 --- a/test/test_treetensornetworks/test_solvers/test_tdvp.jl +++ b/test/test_treetensornetworks/test_solvers/test_tdvp.jl @@ -35,7 +35,7 @@ using Test: @testset, @test H = mpo(os, s) - ψ0 = random_mps(s; internal_inds_space=10) + ψ0 = random_mps(s; link_space=10) # Time evolve forward: ψ1 = tdvp(H, -0.1im, ψ0; nsweeps=1, cutoff, nsites=1) @@ -96,7 +96,7 @@ using Test: @testset, @test H2 = mpo(os2, s) Hs = [H1, H2] - ψ0 = random_mps(s; internal_inds_space=10) + ψ0 = random_mps(s; link_space=10) ψ1 = tdvp(Hs, -0.1im, ψ0; nsweeps=1, cutoff, nsites=1) @@ -133,7 +133,7 @@ using Test: @testset, @test H = mpo(os, s) - ψ0 = random_mps(s; internal_inds_space=10) + ψ0 = random_mps(s; link_space=10) # Time evolve forward: ψ1 = tdvp(H, -0.1im, ψ0; time_step=-0.05im, order, cutoff, nsites=1) @@ -171,7 +171,7 @@ using Test: @testset, @test Ut = exp(-im * tau * HM) - state = mps(s; states=(n -> isodd(n) ? "Up" : "Dn")) + state = mps(n -> isodd(n) ? "Up" : "Dn", s) psi2 = deepcopy(state) psix = contract(state) @@ -250,7 +250,7 @@ using Test: @testset, @test end append!(gates, reverse(gates)) - state = mps(s; states=(n -> isodd(n) ? "Up" : "Dn")) + state = mps(n -> isodd(n) ? "Up" : "Dn", s) phi = deepcopy(state) c = div(N, 2) @@ -289,7 +289,7 @@ using Test: @testset, @test # Evolve using TDVP # - phi = mps(s; states=(n -> isodd(n) ? "Up" : "Dn")) + phi = mps(n -> isodd(n) ? "Up" : "Dn", s) obs = observer( "Sz" => (; state) -> expect("Sz", state; vertices=[c])[c], @@ -329,7 +329,7 @@ using Test: @testset, @test H = mpo(os, s) - state = random_mps(s; internal_inds_space=2) + state = random_mps(s; link_space=2) en0 = inner(state', H, state) nsites = [repeat([2], 10); repeat([1], 10)] maxdim = 32 @@ -382,7 +382,7 @@ using Test: @testset, @test "Sz" => step_measure_sz, "En" => step_measure_en, "info" => get_info ) - state2 = mps(s; states=(n -> isodd(n) ? "Up" : "Dn")) + state2 = mps(n -> isodd(n) ? "Up" : "Dn", s) tdvp( H, -im * ttotal, @@ -502,7 +502,7 @@ end Ut = exp(-im * tau * HM) - state = ttn(ComplexF64, s, v -> iseven(sum(isodd.(v))) ? "Up" : "Dn") + state = ttn(ComplexF64, v -> iseven(sum(isodd.(v))) ? "Up" : "Dn", s) statex = contract(state) Sz_tdvp = Float64[] @@ -560,7 +560,7 @@ end end append!(gates, reverse(gates)) - state = ttn(s, v -> iseven(sum(isodd.(v))) ? "Up" : "Dn") + state = ttn(v -> iseven(sum(isodd.(v))) ? "Up" : "Dn", s) phi = copy(state) c = (2, 1) @@ -599,7 +599,7 @@ end # Evolve using TDVP # - phi = ttn(s, v -> iseven(sum(isodd.(v))) ? "Up" : "Dn") + phi = ttn(v -> iseven(sum(isodd.(v))) ? "Up" : "Dn", s) obs = observer( "Sz" => (; state) -> expect("Sz", state; vertices=[c])[c], "En" => (; state) -> real(inner(state', H, state)), diff --git a/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl b/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl index 58d4e6b7..343d4e0d 100644 --- a/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl +++ b/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl @@ -132,7 +132,7 @@ end ℋ⃗₀ = [ℋ₁₀, ℋ₂₀] H⃗₀ = [mpo(ℋ₀, s) for ℋ₀ in ℋ⃗₀] - ψ₀ = complex(mps(s; states=(j -> isodd(j) ? "↑" : "↓"))) + ψ₀ = complex(mps(j -> isodd(j) ? "↑" : "↓", s)) ψₜ_ode = tdvp( H⃗₀, @@ -194,7 +194,7 @@ end ℋ⃗₀ = [ℋ₁₀, ℋ₂₀] H⃗₀ = [ttn(ℋ₀, s) for ℋ₀ in ℋ⃗₀] - ψ₀ = ttn(ComplexF64, s, v -> iseven(sum(isodd.(v))) ? "↑" : "↓") + ψ₀ = ttn(ComplexF64, v -> iseven(sum(isodd.(v))) ? "↑" : "↓", s) ψₜ_ode = tdvp( H⃗₀, diff --git a/test/test_ttno.jl b/test/test_ttno.jl index b83a192f..79c25175 100644 --- a/test/test_ttno.jl +++ b/test/test_ttno.jl @@ -1,6 +1,6 @@ @eval module $(gensym()) using Graphs: vertices -using ITensorNetworks: ttn, contract, ortho_center, siteinds, union_all_inds +using ITensorNetworks: ttn, contract, ortho_region, siteinds, union_all_inds using ITensors: @disable_warn_order, prime, randomITensor using LinearAlgebra: norm using NamedGraphs: named_comb_tree @@ -27,7 +27,7 @@ using Test: @test, @testset O = randomITensor(sites_o...) # dense TTN constructor from IndsNetwork @disable_warn_order o1 = ttn(O, is_isp; cutoff) - root_vertex = only(ortho_center(o1)) + root_vertex = only(ortho_region(o1)) @disable_warn_order begin O1 = contract(o1, root_vertex) end diff --git a/test/test_ttns.jl b/test/test_ttns.jl index 24f5eeae..c9ae7344 100644 --- a/test/test_ttns.jl +++ b/test/test_ttns.jl @@ -1,7 +1,7 @@ @eval module $(gensym()) using DataGraphs: vertex_data using Graphs: vertices -using ITensorNetworks: ttn, contract, ortho_center, siteinds +using ITensorNetworks: ttn, contract, ortho_region, siteinds using ITensors: @disable_warn_order, randomITensor using LinearAlgebra: norm using NamedGraphs: named_comb_tree @@ -25,7 +25,7 @@ using Test: @test, @testset S = randomITensor(vertex_data(is)...) # dense TTN constructor from IndsNetwork @disable_warn_order s1 = ttn(S, is; cutoff) - root_vertex = only(ortho_center(s1)) + root_vertex = only(ortho_region(s1)) @disable_warn_order begin S1 = contract(s1, root_vertex) end