From 43ff72e0e7dbfe8ec2453641d3082de5ae1f3065 Mon Sep 17 00:00:00 2001 From: Joseph Tindall <51231103+JoeyT1994@users.noreply.github.com> Date: Tue, 14 May 2024 19:48:18 -0400 Subject: [PATCH 01/11] Account for BP edge case where network evaluates to 0 (#178) --- src/caches/beliefpropagationcache.jl | 5 ++++- src/contract.jl | 1 + test/test_belief_propagation.jl | 11 +++++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/caches/beliefpropagationcache.jl b/src/caches/beliefpropagationcache.jl index 2ce338f3..b980a52f 100644 --- a/src/caches/beliefpropagationcache.jl +++ b/src/caches/beliefpropagationcache.jl @@ -19,7 +19,10 @@ default_message_norm(m::ITensor) = norm(m) function default_message_update(contract_list::Vector{ITensor}; kwargs...) sequence = optimal_contraction_sequence(contract_list) updated_messages = contract(contract_list; sequence, kwargs...) - updated_messages /= norm(updated_messages) + message_norm = norm(updated_messages) + if !iszero(message_norm) + updated_messages /= message_norm + end return ITensor[updated_messages] end @traitfn default_bp_maxiter(g::::(!IsDirected)) = is_tree(g) ? 1 : nothing diff --git a/src/contract.jl b/src/contract.jl index 0fc575a6..4adb0e10 100644 --- a/src/contract.jl +++ b/src/contract.jl @@ -73,6 +73,7 @@ function logscalar( denominator_terms end + any(iszero, denominator_terms) && return -Inf return sum(log.(numerator_terms)) - sum(log.((denominator_terms))) end diff --git a/test/test_belief_propagation.jl b/test/test_belief_propagation.jl index 66cf25d7..ff8aa2e9 100644 --- a/test/test_belief_propagation.jl +++ b/test/test_belief_propagation.jl @@ -2,7 +2,6 @@ using Compat: Compat using Graphs: vertices # Trigger package extension. -using GraphsFlows: GraphsFlows using ITensorNetworks: ITensorNetworks, BeliefPropagationCache, @@ -18,6 +17,7 @@ using ITensorNetworks: message, partitioned_tensornetwork, random_tensornetwork, + scalar, siteinds, split_index, tensornetwork, @@ -28,7 +28,7 @@ using ITensors: ITensors, ITensor, combiner, dag, inds, inner, op, prime, random using ITensorNetworks.ModelNetworks: ModelNetworks using ITensors.NDTensors: array using LinearAlgebra: eigvals, tr -using NamedGraphs: NamedEdge +using NamedGraphs: NamedEdge, NamedGraph using NamedGraphs.NamedGraphGenerators: named_comb_tree, named_grid using NamedGraphs.PartitionedGraphs: PartitionVertex, partitionedges using Random: Random @@ -75,5 +75,12 @@ using Test: @test, @testset @test all(eig -> imag(eig) ≈ 0, eigs) @test all(eig -> real(eig) >= -eps(eltype(eig)), eigs) + + #Test edge case of network which evalutes to 0 + χ = 2 + g = named_grid((3, 1)) + ψ = random_tensornetwork(ComplexF64, g; link_space=χ) + ψ[(1, 1)] = 0.0 * ψ[(1, 1)] + @test iszero(scalar(ψ; alg="bp")) end end From 29d031bc3be15b49857a561d551b745e74f956a1 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Tue, 14 May 2024 19:48:39 -0400 Subject: [PATCH 02/11] Bump to v0.11.8 [no ci] --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 3e2805a4..57d2753c 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.7" +version = "0.11.8" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" From e65f9f7fb26d33c38353dc40f710c8bd186f1fa5 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Fri, 17 May 2024 00:29:16 -0400 Subject: [PATCH 03/11] Use `random_itensor`, `random_mps`, etc. (#183) --- Project.toml | 6 +++--- .../ITensorNetworksOMEinsumContractionOrdersExt.jl | 2 +- src/lib/ModelNetworks/src/ModelNetworks.jl | 4 ++-- src/mpo_mps_compatibility.jl | 4 ++-- test/test_belief_propagation.jl | 4 ++-- test/test_binary_tree_partition.jl | 10 +++++----- test/test_contract_deltas.jl | 4 ++-- test/test_forms.jl | 4 ++-- test/test_gauging.jl | 4 ++-- test/test_itensornetwork.jl | 2 +- test/test_itensorsextensions.jl | 4 ++-- .../test_solvers/test_contract.jl | 6 +++--- test/test_treetensornetworks/test_solvers/test_dmrg.jl | 6 +++--- test/test_ttno.jl | 4 ++-- test/test_ttns.jl | 4 ++-- 15 files changed, 34 insertions(+), 34 deletions(-) diff --git a/Project.toml b/Project.toml index 57d2753c..1aa4351d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.8" +version = "0.11.9" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -58,8 +58,8 @@ DocStringExtensions = "0.9" EinExprs = "0.6.4" Graphs = "1.8" GraphsFlows = "0.1.1" -ITensorMPS = "0.1" -ITensors = "0.4, 0.5, 0.6" +ITensorMPS = "0.2.2" +ITensors = "0.6.8" IsApprox = "0.1" IterTools = "1.4.0" KrylovKit = "0.6, 0.7" diff --git a/ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl b/ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl index 6511327f..feee0d15 100644 --- a/ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl +++ b/ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl @@ -47,7 +47,7 @@ Returns a [`NestedEinsum`](@ref) instance. ```jldoctest julia> using ITensors, ITensorContractionOrders julia> i, j, k, l = Index(4), Index(5), Index(6), Index(7); -julia> x, y, z = randomITensor(i, j), randomITensor(j, k), randomITensor(k, l); +julia> x, y, z = random_itensor(i, j), random_itensor(j, k), random_itensor(k, l); julia> net = optimize_contraction([x, y, z]; optimizer=TreeSA()); ``` """ diff --git a/src/lib/ModelNetworks/src/ModelNetworks.jl b/src/lib/ModelNetworks/src/ModelNetworks.jl index 41fde3b1..88fee784 100644 --- a/src/lib/ModelNetworks/src/ModelNetworks.jl +++ b/src/lib/ModelNetworks/src/ModelNetworks.jl @@ -1,7 +1,7 @@ module ModelNetworks using Graphs: degree, dst, edges, src using ..ITensorNetworks: IndsNetwork, delta_network, insert_linkinds, itensor -using ITensors: commoninds, diagITensor, inds, noprime +using ITensors: commoninds, diag_itensor, inds, noprime using LinearAlgebra: Diagonal, eigen using NamedGraphs: NamedGraph @@ -21,7 +21,7 @@ function ising_network( tn = delta_network(eltype, s) if (szverts != nothing) for v in szverts - tn[v] = diagITensor(eltype[1, -1], inds(tn[v])) + tn[v] = diag_itensor(eltype[1, -1], inds(tn[v])) end end for edge in edges(tn) diff --git a/src/mpo_mps_compatibility.jl b/src/mpo_mps_compatibility.jl index a0c0b228..ac6d43f5 100644 --- a/src/mpo_mps_compatibility.jl +++ b/src/mpo_mps_compatibility.jl @@ -9,9 +9,9 @@ function ITensorMPS.MPO(opsum_sum::Sum{<:OpSum}, s::IndsNetwork) return ITensorMPS.MPO(sum(Ops.terms(opsum_sum)), s) end -function ITensorMPS.randomMPS(s::IndsNetwork, args...; kwargs...) +function ITensorMPS.random_mps(s::IndsNetwork, args...; kwargs...) s_linear = [only(s[v]) for v in 1:nv(s)] - return ITensorMPS.randomMPS(s_linear, args...; kwargs...) + return ITensorMPS.random_mps(s_linear, args...; kwargs...) end function ITensorMPS.MPS(s::IndsNetwork, args...; kwargs...) diff --git a/test/test_belief_propagation.jl b/test/test_belief_propagation.jl index ff8aa2e9..f960bb8b 100644 --- a/test/test_belief_propagation.jl +++ b/test/test_belief_propagation.jl @@ -24,7 +24,7 @@ using ITensorNetworks: update, update_factor, update_message -using ITensors: ITensors, ITensor, combiner, dag, inds, inner, op, prime, randomITensor +using ITensors: ITensors, ITensor, combiner, dag, inds, inner, op, prime, random_itensor using ITensorNetworks.ModelNetworks: ModelNetworks using ITensors.NDTensors: array using LinearAlgebra: eigvals, tr @@ -55,7 +55,7 @@ using Test: @test, @testset #Test updating the underlying tensornetwork in the cache v = first(vertices(ψψ)) - new_tensor = randomITensor(inds(ψψ[v])) + new_tensor = random_itensor(inds(ψψ[v])) bpc_updated = update_factor(bpc, v, new_tensor) ψψ_updated = tensornetwork(bpc_updated) @test ψψ_updated[v] == new_tensor diff --git a/test/test_binary_tree_partition.jl b/test/test_binary_tree_partition.jl index 970d7f1b..7b2b8050 100644 --- a/test/test_binary_tree_partition.jl +++ b/test/test_binary_tree_partition.jl @@ -3,7 +3,7 @@ using DataGraphs: DataGraph, underlying_graph, vertex_data using Graphs: add_vertex!, vertices # Trigger package extension. using GraphsFlows: GraphsFlows -using ITensors: Index, ITensor, contract, noncommoninds, randomITensor +using ITensors: Index, ITensor, contract, noncommoninds, random_itensor using ITensorMPS: MPS using ITensorNetworks: _DensityMartrixAlgGraph, @@ -35,7 +35,7 @@ using Test: @test, @testset o = Index(2, "o") p = Index(2, "p") - T = randomITensor(i, j, k, l, m, n, o, p) + T = random_itensor(i, j, k, l, m, n, o, p) M = MPS(T, (i, j, k, l, m, n, o, p); cutoff=1e-5, maxdim=500) tn = ITensorNetwork(M[:]) for out in [binary_tree_structure(tn), path_graph_structure(tn)] @@ -75,7 +75,7 @@ end @testset "test partition with mincut_recursive_bisection alg of disconnected tn" begin inds = [Index(2, "$i") for i in 1:5] - tn = ITensorNetwork([randomITensor(i) for i in inds]) + tn = ITensorNetwork([random_itensor(i) for i in inds]) par = _partition(tn, binary_tree_structure(tn); alg="mincut_recursive_bisection") network = mapreduce(v -> collect(eachtensor(par[v])), vcat, vertices(par)) @test isapprox(contract(tn), contract(network)) @@ -88,7 +88,7 @@ end l = Index(2, "l") m = Index(2, "m") for dtype in (Float64, Complex{Float64}) - T = randomITensor(dtype, i, j, k, l, m) + T = random_itensor(dtype, i, j, k, l, m) M = MPS(T, (i, j, k, l, m); cutoff=1e-5, maxdim=5) network = M[:] out1 = contract(network...) @@ -126,7 +126,7 @@ end k = Index(2, "k") l = Index(2, "l") m = Index(2, "m") - T = randomITensor(i, j, k, l, m) + T = random_itensor(i, j, k, l, m) M = MPS(T, (i, j, k, l, m); cutoff=1e-5, maxdim=5) tn = ITensorNetwork(M[:]) out_tree = path_graph_structure(tn) diff --git a/test/test_contract_deltas.jl b/test/test_contract_deltas.jl index fa6ade3b..75e92870 100644 --- a/test/test_contract_deltas.jl +++ b/test/test_contract_deltas.jl @@ -2,7 +2,7 @@ using Graphs: dfs_tree, nv, vertices # Trigger package extension. using GraphsFlows: GraphsFlows -using ITensors: Index, ITensor, delta, noncommoninds, randomITensor +using ITensors: Index, ITensor, delta, noncommoninds, random_itensor using ITensorNetworks: IndsNetwork, ITensorNetwork, @@ -21,7 +21,7 @@ using Test: @test, @testset @testset "test _contract_deltas with no deltas" begin i = Index(2, "i") - t = randomITensor(i) + t = random_itensor(i) tn = _contract_deltas(ITensorNetwork([t])) @test tn[1] == t end diff --git a/test/test_forms.jl b/test/test_forms.jl index be0b52a3..41561f0b 100644 --- a/test/test_forms.jl +++ b/test/test_forms.jl @@ -20,7 +20,7 @@ using ITensorNetworks: tensornetwork, union_all_inds, update -using ITensors: contract, dag, inds, prime, randomITensor +using ITensors: contract, dag, inds, prime, random_itensor using LinearAlgebra: norm using Test: @test, @testset using Random: Random @@ -52,7 +52,7 @@ using Random: Random @test isempty(flatten_siteinds(qf)) v = (1, 1) - new_tensor = randomITensor(inds(ψket[v])) + new_tensor = random_itensor(inds(ψket[v])) qf_updated = update(qf, v, copy(new_tensor)) @test tensornetwork(qf_updated)[bra_vertex(qf_updated, v)] ≈ diff --git a/test/test_gauging.jl b/test/test_gauging.jl index 2c8b6f8a..8b328a69 100644 --- a/test/test_gauging.jl +++ b/test/test_gauging.jl @@ -9,7 +9,7 @@ using ITensorNetworks: random_tensornetwork, siteinds, update -using ITensors: diagITensor, inds, inner +using ITensors: diag_itensor, inds, inner using ITensors.NDTensors: vector using LinearAlgebra: diag using NamedGraphs.NamedGraphGenerators: named_grid @@ -43,7 +43,7 @@ using Test: @test, @testset #Test all message tensors are approximately diagonal even when we keep running BP bp_cache = update(bp_cache; maxiter=10) for m_e in values(messages(bp_cache)) - @test diagITensor(vector(diag(only(m_e))), inds(only(m_e))) ≈ only(m_e) atol = 1e-8 + @test diag_itensor(vector(diag(only(m_e))), inds(only(m_e))) ≈ only(m_e) atol = 1e-8 end end end diff --git a/test/test_itensornetwork.jl b/test/test_itensornetwork.jl index 36d29657..ed4864b8 100644 --- a/test/test_itensornetwork.jl +++ b/test/test_itensornetwork.jl @@ -30,7 +30,7 @@ using ITensors: itensor, onehot, order, - randomITensor, + random_itensor, scalartype, sim, uniqueinds diff --git a/test/test_itensorsextensions.jl b/test/test_itensorsextensions.jl index b2438780..1a60b247 100644 --- a/test/test_itensorsextensions.jl +++ b/test/test_itensorsextensions.jl @@ -10,7 +10,7 @@ using ITensors: noprime, op, prime, - randomITensor, + random_itensor, replaceind, replaceinds, sim @@ -53,7 +53,7 @@ Random.seed!(1234) n in (2, 3, 5, 10) i, j = Index.(([QN() => n], [QN() => n])) - A = randomITensor(elt, i, j) + A = random_itensor(elt, i, j) P = A * prime(dag(A), i) sqrtP = map_eigvals(sqrt, P, i, i'; ishermitian=true) inv_P = dag(map_eigvals(inv, P, i, i'; ishermitian=true)) diff --git a/test/test_treetensornetworks/test_solvers/test_contract.jl b/test/test_treetensornetworks/test_solvers/test_contract.jl index 6c2c7e48..cc7dfaaf 100644 --- a/test/test_treetensornetworks/test_solvers/test_contract.jl +++ b/test/test_treetensornetworks/test_solvers/test_contract.jl @@ -150,11 +150,11 @@ end nbit = 3 sites = siteinds("Qubit", nbit) - # randomMPO does not support linkdims keyword. + # random_mpo does not support linkdims keyword. M1 = replaceprime( - ITensorMPS.randomMPO(sites) + ITensorMPS.randomMPO(sites), 1 => 2, 0 => 1 + ITensorMPS.random_mpo(sites) + ITensorMPS.random_mpo(sites), 1 => 2, 0 => 1 ) - M2 = ITensorMPS.randomMPO(sites) + ITensorMPS.randomMPO(sites) + M2 = ITensorMPS.random_mpo(sites) + ITensorMPS.random_mpo(sites) M12_ref = contract(M1, M2; alg="naive") t12_ref = ttn([M12_ref[v] for v in eachindex(M12_ref)]) diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg.jl b/test/test_treetensornetworks/test_solvers/test_dmrg.jl index 881ffc5b..2f638e90 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg.jl @@ -207,7 +207,7 @@ end vmap = Dictionary(collect(vertices(s))[linear_order], 1:length(linear_order)) sline = only.(collect(vertex_data(s)))[linear_order] Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) - psiline = ITensorMPS.randomMPS(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) + psiline = ITensorMPS.random_mps(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e2, psi2 = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) @test inner(psi', H, psi) ≈ inner(psi2', Hline, psi2) atol = 1e-5 @@ -242,7 +242,7 @@ end # get MPS / MPO with JW string result ITensors.disable_auto_fermion() Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) - psiline = ITensorMPS.randomMPS(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) + psiline = ITensorMPS.random_mps(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e_jw, psi_jw = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) ITensors.enable_auto_fermion() @@ -261,7 +261,7 @@ end # Compare to `ITensors.MPO` version of `dmrg` Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) - psiline = ITensorMPS.randomMPS(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) + psiline = ITensorMPS.random_mps(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e2, psi2 = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) @test inner(psi', H, psi) ≈ inner(psi2', Hline, psi2) atol = 1e-5 diff --git a/test/test_ttno.jl b/test/test_ttno.jl index 95118ba1..3a5933ef 100644 --- a/test/test_ttno.jl +++ b/test/test_ttno.jl @@ -1,7 +1,7 @@ @eval module $(gensym()) using Graphs: vertices using ITensorNetworks: ttn, contract, ortho_region, siteinds, union_all_inds -using ITensors: @disable_warn_order, prime, randomITensor +using ITensors: @disable_warn_order, prime, random_itensor using LinearAlgebra: norm using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: shuffle @@ -24,7 +24,7 @@ using Test: @test, @testset cutoff = 1e-10 sites_o = [is_isp[v] for v in vertex_order] # create random ITensor with these indices - O = randomITensor(sites_o...) + O = random_itensor(sites_o...) # dense TTN constructor from IndsNetwork @disable_warn_order o1 = ttn(O, is_isp; cutoff) root_vertex = only(ortho_region(o1)) diff --git a/test/test_ttns.jl b/test/test_ttns.jl index 0a06e6e8..efe1edcc 100644 --- a/test/test_ttns.jl +++ b/test/test_ttns.jl @@ -2,7 +2,7 @@ using DataGraphs: vertex_data using Graphs: vertices using ITensorNetworks: ttn, contract, ortho_region, siteinds -using ITensors: @disable_warn_order, randomITensor +using ITensors: @disable_warn_order, random_itensor using LinearAlgebra: norm using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: shuffle @@ -22,7 +22,7 @@ using Test: @test, @testset @testset "Construct TTN from ITensor or Array" begin cutoff = 1e-10 # create random ITensor with these indices - S = randomITensor(vertex_data(is)...) + S = random_itensor(vertex_data(is)...) # dense TTN constructor from IndsNetwork @disable_warn_order s1 = ttn(S, is; cutoff) root_vertex = only(ortho_region(s1)) From de7d4dd9e981d39d9cc0866301aaaa55f4b8eb53 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Fri, 17 May 2024 11:05:13 -0400 Subject: [PATCH 04/11] Add support for specifying RNGs in tensor network constructors, use StableRNGs.jl in tests (#184) --- Project.toml | 2 +- TODO.md | 28 ----------- src/specialitensornetworks.jl | 47 +++++++++++++++---- test/Project.toml | 1 + test/test_additensornetworks.jl | 9 ++-- test/test_apply.jl | 15 ++---- test/test_belief_propagation.jl | 17 +++---- test/test_binary_tree_partition.jl | 4 +- test/test_contract_deltas.jl | 4 +- test/test_contraction_sequence.jl | 7 ++- test/test_contraction_sequence_to_graph.jl | 10 ++-- test/test_expect.jl | 12 ++--- test/test_forms.jl | 14 +++--- test/test_gauging.jl | 6 +-- test/test_indsnetwork.jl | 46 +++++++++--------- test/test_inner.jl | 10 ++-- test/test_itensornetwork.jl | 46 ++++++++---------- test/test_itensorsextensions.jl | 10 ++-- test/test_treetensornetworks/test_expect.jl | 6 +-- .../test_solvers/Project.toml | 1 + .../test_solvers/test_contract.jl | 13 +++-- .../test_solvers/test_dmrg.jl | 28 +++++++---- .../test_solvers/test_dmrg_x.jl | 34 ++++---------- .../test_solvers/test_linsolve.jl | 12 ++--- .../test_solvers/test_tdvp.jl | 23 +++++---- test/test_ttno.jl | 15 +++--- test/test_ttns.jl | 13 ++--- 27 files changed, 208 insertions(+), 225 deletions(-) delete mode 100644 TODO.md diff --git a/Project.toml b/Project.toml index 1aa4351d..2765a618 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.9" +version = "0.11.10" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" diff --git a/TODO.md b/TODO.md deleted file mode 100644 index a45086b5..00000000 --- a/TODO.md +++ /dev/null @@ -1,28 +0,0 @@ -# TODO - -## Missing functions - - - `Graphs.connected_components(::ITensorNetwork)` - - Shortest path algorithms: `Graphs.a_star`. - -## Contraction sequence optimization - - - https://arxiv.org/abs/1304.6112 (Faster identification of optimal contraction sequences for tensor networks) - - https://arxiv.org/abs/1310.8023 (Improving the efficiency of variational tensor network algorithms) - - https://arxiv.org/abs/quant-ph/0511069 (Simulating quantum computation by contracting tensor networks) - - https://journals.aps.org/pre/abstract/10.1103/PhysRevE.100.043309 (Towards a polynomial algorithm for optimal contraction sequence of tensor networks from trees) - - https://arxiv.org/abs/2001.08063 (Algorithms for Tensor Network Contraction Ordering) - - https://arxiv.org/abs/2002.01935 (Hyper-optimized tensor network contraction) - -## Tensor network truncation and contraction - - https://arxiv.org/abs/1709.07460 (Renormalization of tensor networks using graph independent local truncations) - - https://arxiv.org/abs/1801.05390 (Gauge fixing, canonical forms and optimal truncations in tensor networks with closed loops) - - https://arxiv.org/abs/1912.03014 (Contracting Arbitrary Tensor Networks: General Approximate Algorithm and Applications in Graphical Models and Quantum Circuit Simulations) - -## Tensor network optimization - - https://arxiv.org/abs/1903.09650 (Differentiable Programming Tensor Networks) - - https://arxiv.org/abs/1912.02780 (Automatic Differentiation for Second Renormalization of Tensor Networks) - - https://arxiv.org/abs/2001.04121 (Automatic differentiation of dominant eigensolver and its applications in quantum physics) - - https://arxiv.org/abs/2007.03638 (Riemannian optimization of isometric tensor networks) - - https://arxiv.org/abs/2101.03935 (Generating Function for Tensor Network Diagrammatic Summation) - diff --git a/src/specialitensornetworks.jl b/src/specialitensornetworks.jl index d36d4778..f0c4f26f 100644 --- a/src/specialitensornetworks.jl +++ b/src/specialitensornetworks.jl @@ -2,6 +2,7 @@ using ITensors: delta using ITensors.NDTensors: dim using DataGraphs: IsUnderlyingGraph using Distributions: Distribution +using Random: Random, AbstractRNG """ RETURN A TENSOR NETWORK WITH COPY TENSORS ON EACH VERTEX. @@ -28,22 +29,40 @@ end """ Build an ITensor network on a graph specified by the inds network s. Bond_dim is given by link_space and entries are randomised (normal distribution, mean 0 std 1) """ -function random_tensornetwork(eltype::Type, s::IndsNetwork; kwargs...) +function random_tensornetwork(rng::AbstractRNG, eltype::Type, s::IndsNetwork; kwargs...) return ITensorNetwork(s; kwargs...) do v - return inds -> itensor(randn(eltype, dim.(inds)...), inds) + return inds -> itensor(randn(rng, eltype, dim.(inds)...), inds) end end +function random_tensornetwork(eltype::Type, s::IndsNetwork; kwargs...) + return random_tensornetwork(Random.default_rng(), eltype, s; kwargs...) +end + +function random_tensornetwork(rng::AbstractRNG, s::IndsNetwork; kwargs...) + return random_tensornetwork(rng, Float64, s; kwargs...) +end + function random_tensornetwork(s::IndsNetwork; kwargs...) - return random_tensornetwork(Float64, s; kwargs...) + return random_tensornetwork(Random.default_rng(), s; kwargs...) +end + +@traitfn function random_tensornetwork( + rng::AbstractRNG, eltype::Type, g::::IsUnderlyingGraph; kwargs... +) + return random_tensornetwork(rng, eltype, IndsNetwork(g); kwargs...) end @traitfn function random_tensornetwork(eltype::Type, g::::IsUnderlyingGraph; kwargs...) - return random_tensornetwork(eltype, IndsNetwork(g); kwargs...) + return random_tensornetwork(Random.default_rng(), eltype, g; kwargs...) +end + +@traitfn function random_tensornetwork(rng::AbstractRNG, g::::IsUnderlyingGraph; kwargs...) + return random_tensornetwork(rng, Float64, g; kwargs...) end @traitfn function random_tensornetwork(g::::IsUnderlyingGraph; kwargs...) - return random_tensornetwork(Float64, IndsNetwork(g); kwargs...) + return random_tensornetwork(Random.default_rng(), g; kwargs...) end """ @@ -51,14 +70,26 @@ Build an ITensor network on a graph specified by the inds network s. Bond_dim is given by link_space and entries are randomized. The random distribution is based on the input argument `distribution`. """ -function random_tensornetwork(distribution::Distribution, s::IndsNetwork; kwargs...) +function random_tensornetwork( + rng::AbstractRNG, distribution::Distribution, s::IndsNetwork; kwargs... +) return ITensorNetwork(s; kwargs...) do v - return inds -> itensor(rand(distribution, dim.(inds)...), inds) + return inds -> itensor(rand(rng, distribution, dim.(inds)...), inds) end end +function random_tensornetwork(distribution::Distribution, s::IndsNetwork; kwargs...) + return random_tensornetwork(Random.default_rng(), distribution, s; kwargs...) +end + +@traitfn function random_tensornetwork( + rng::AbstractRNG, distribution::Distribution, g::::IsUnderlyingGraph; kwargs... +) + return random_tensornetwork(rng, distribution, IndsNetwork(g); kwargs...) +end + @traitfn function random_tensornetwork( distribution::Distribution, g::::IsUnderlyingGraph; kwargs... ) - return random_tensornetwork(distribution, IndsNetwork(g); kwargs...) + return random_tensornetwork(Random.default_rng(), distribution, g; kwargs...) end diff --git a/test/Project.toml b/test/Project.toml index 0ea7fb5d..3dd73b41 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -24,6 +24,7 @@ OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228" diff --git a/test/test_additensornetworks.jl b/test/test_additensornetworks.jl index 88b12a1d..a2176051 100644 --- a/test/test_additensornetworks.jl +++ b/test/test_additensornetworks.jl @@ -5,11 +5,9 @@ using NamedGraphs.NamedGraphGenerators: named_grid using ITensorNetworks: ITensorNetwork, inner_network, random_tensornetwork, siteinds using ITensors: ITensors, apply, op, scalar, inner using LinearAlgebra: norm_sqr -using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset - @testset "add_itensornetworks" begin - Random.seed!(5623) g = named_grid((2, 2)) s = siteinds("S=1/2", g) ψ1 = ITensorNetwork(v -> "↑", s) @@ -32,8 +30,9 @@ using Test: @test, @testset rem_edge!(s2, NamedEdge((1, 1) => (1, 2))) v = rand(vertices(g)) - ψ1 = random_tensornetwork(s1; link_space=χ) - ψ2 = random_tensornetwork(s2; link_space=χ) + rng = StableRNG(1234) + ψ1 = random_tensornetwork(rng, s1; link_space=χ) + ψ2 = random_tensornetwork(rng, s2; link_space=χ) ψ12 = ψ1 + ψ2 diff --git a/test/test_apply.jl b/test/test_apply.jl index ef0a8fdc..18d4f590 100644 --- a/test/test_apply.jl +++ b/test/test_apply.jl @@ -14,39 +14,32 @@ using ITensorNetworks: using ITensors: ITensors, inner, op using NamedGraphs.NamedGraphGenerators: named_grid using NamedGraphs.PartitionedGraphs: PartitionVertex -using Random: Random using SplitApplyCombine: group +using StableRNGs: StableRNG using Test: @test, @testset - @testset "apply" begin - Random.seed!(5623) g_dims = (2, 2) n = prod(g_dims) g = named_grid(g_dims) s = siteinds("S=1/2", g) χ = 2 - ψ = random_tensornetwork(s; link_space=χ) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, s; link_space=χ) v1, v2 = (2, 2), (1, 2) ψψ = norm_sqr_network(ψ) #Simple Belief Propagation Grouping bp_cache = BeliefPropagationCache(ψψ, group(v -> v[1], vertices(ψψ))) bp_cache = update(bp_cache; maxiter=20) envsSBP = environment(bp_cache, PartitionVertex.([v1, v2])) - ψv = VidalITensorNetwork(ψ) - #This grouping will correspond to calculating the environments exactly (each column of the grid is a partition) bp_cache = BeliefPropagationCache(ψψ, group(v -> v[1][1], vertices(ψψ))) bp_cache = update(bp_cache; maxiter=20) envsGBP = environment(bp_cache, [(v1, "bra"), (v1, "ket"), (v2, "bra"), (v2, "ket")]) - inner_alg = "exact" - ngates = 5 - for i in 1:ngates o = op("RandomUnitary", s[v1]..., s[v2]...) - ψOexact = apply(o, ψ; cutoff=1e-16) ψOSBP = apply( o, @@ -79,9 +72,7 @@ using Test: @test, @testset fGBP = inner(ψOGBP, ψOexact; alg=inner_alg) / sqrt(inner(ψOexact, ψOexact; alg=inner_alg) * inner(ψOGBP, ψOGBP; alg=inner_alg)) - @test real(fGBP * conj(fGBP)) >= real(fSBP * conj(fSBP)) - @test isapprox(real(fSBP * conj(fSBP)), real(fVidal * conj(fVidal)); atol=1e-3) end end diff --git a/test/test_belief_propagation.jl b/test/test_belief_propagation.jl index f960bb8b..728b06d6 100644 --- a/test/test_belief_propagation.jl +++ b/test/test_belief_propagation.jl @@ -31,31 +31,27 @@ using LinearAlgebra: eigvals, tr using NamedGraphs: NamedEdge, NamedGraph using NamedGraphs.NamedGraphGenerators: named_comb_tree, named_grid using NamedGraphs.PartitionedGraphs: PartitionVertex, partitionedges -using Random: Random using SplitApplyCombine: group +using StableRNGs: StableRNG using Test: @test, @testset - @testset "belief_propagation" begin ITensors.disable_warn_order() - g = named_grid((3, 3)) s = siteinds("S=1/2", g) χ = 2 - Random.seed!(1234) - ψ = random_tensornetwork(s; link_space=χ) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, s; link_space=χ) ψψ = ψ ⊗ prime(dag(ψ); sites=[]) - bpc = BeliefPropagationCache(ψψ) bpc = update(bpc; maxiter=50, tol=1e-10) - #Test messages are converged for pe in partitionedges(partitioned_tensornetwork(bpc)) @test update_message(bpc, pe) ≈ message(bpc, pe) atol = 1e-8 end - #Test updating the underlying tensornetwork in the cache v = first(vertices(ψψ)) - new_tensor = random_itensor(inds(ψψ[v])) + rng = StableRNG(1234) + new_tensor = random_itensor(rng, inds(ψψ[v])) bpc_updated = update_factor(bpc, v, new_tensor) ψψ_updated = tensornetwork(bpc_updated) @test ψψ_updated[v] == new_tensor @@ -79,7 +75,8 @@ using Test: @test, @testset #Test edge case of network which evalutes to 0 χ = 2 g = named_grid((3, 1)) - ψ = random_tensornetwork(ComplexF64, g; link_space=χ) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, ComplexF64, g; link_space=χ) ψ[(1, 1)] = 0.0 * ψ[(1, 1)] @test iszero(scalar(ψ; alg="bp")) end diff --git a/test/test_binary_tree_partition.jl b/test/test_binary_tree_partition.jl index 7b2b8050..2dc8d13a 100644 --- a/test/test_binary_tree_partition.jl +++ b/test/test_binary_tree_partition.jl @@ -23,6 +23,7 @@ using NamedGraphs.NamedGraphGenerators: named_grid using NamedGraphs.GraphsExtensions: is_binary_arborescence, post_order_dfs_vertices, root_vertex using OMEinsumContractionOrders: OMEinsumContractionOrders +using StableRNGs: StableRNG using Test: @test, @testset @testset "test mincut functions on top of MPS" begin @@ -60,7 +61,8 @@ end @testset "test _binary_tree_partition_inds of a 2D network" begin N = (3, 3, 3) linkdim = 2 - network = random_tensornetwork(IndsNetwork(named_grid(N)); link_space=linkdim) + rng = StableRNG(1234) + network = random_tensornetwork(rng, IndsNetwork(named_grid(N)); link_space=linkdim) tn = Array{ITensor,length(N)}(undef, N...) for v in vertices(network) tn[v...] = network[v...] diff --git a/test/test_contract_deltas.jl b/test/test_contract_deltas.jl index 75e92870..93c8d768 100644 --- a/test/test_contract_deltas.jl +++ b/test/test_contract_deltas.jl @@ -17,6 +17,7 @@ using ITensorNetworks: random_tensornetwork using NamedGraphs.GraphsExtensions: leaf_vertices, root_vertex using NamedGraphs.NamedGraphGenerators: named_grid +using StableRNGs: StableRNG using Test: @test, @testset @testset "test _contract_deltas with no deltas" begin @@ -41,7 +42,8 @@ end @testset "test _contract_deltas over partition" begin N = (3, 3, 3) linkdim = 2 - network = random_tensornetwork(IndsNetwork(named_grid(N)); link_space=linkdim) + rng = StableRNG(1234) + network = random_tensornetwork(rng, IndsNetwork(named_grid(N)); link_space=linkdim) tn = Array{ITensor,length(N)}(undef, N...) for v in vertices(network) tn[v...] = network[v...] diff --git a/test/test_contraction_sequence.jl b/test/test_contraction_sequence.jl index 40ea6fc0..e54d2a48 100644 --- a/test/test_contraction_sequence.jl +++ b/test/test_contraction_sequence.jl @@ -5,17 +5,16 @@ using ITensorNetworks: using ITensors: ITensors, contract using NamedGraphs.NamedGraphGenerators: named_grid using OMEinsumContractionOrders: OMEinsumContractionOrders -using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset - -Random.seed!(1234) @testset "contraction_sequence" begin ITensors.@disable_warn_order begin dims = (2, 3) g = named_grid(dims) s = siteinds("S=1/2", g) χ = 10 - ψ = random_tensornetwork(s; link_space=χ) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, s; link_space=χ) tn = norm_sqr_network(ψ) seq_optimal = contraction_sequence(tn; alg="optimal") res_optimal = contract(tn; sequence=seq_optimal)[] diff --git a/test/test_contraction_sequence_to_graph.jl b/test/test_contraction_sequence_to_graph.jl index ca59e720..11c174db 100644 --- a/test/test_contraction_sequence_to_graph.jl +++ b/test/test_contraction_sequence_to_graph.jl @@ -6,20 +6,18 @@ using ITensorNetworks: contraction_sequence_to_graph, contraction_tree_leaf_bipartition, random_tensornetwork -using Test: @test, @testset using NamedGraphs.GraphsExtensions: is_leaf_vertex, leaf_vertices, non_leaf_edges, root_vertex using NamedGraphs.NamedGraphGenerators: named_grid - +using StableRNGs: StableRNG +using Test: @test, @testset @testset "contraction_sequence_to_graph" begin n = 3 dims = (n, n) g = named_grid(dims) - - tn = random_tensornetwork(g; link_space=2) - + rng = StableRNG(1234) + tn = random_tensornetwork(rng, g; link_space=2) seq = contraction_sequence(tn) - g_directed_seq = contraction_sequence_to_digraph(seq) g_seq_leaves = leaf_vertices(g_directed_seq) @test length(g_seq_leaves) == n * n diff --git a/test/test_expect.jl b/test/test_expect.jl index db7234e0..75ed8504 100644 --- a/test/test_expect.jl +++ b/test/test_expect.jl @@ -1,5 +1,4 @@ @eval module $(gensym()) - using Graphs: SimpleGraph, uniform_tree using NamedGraphs: NamedGraph, vertices using NamedGraphs.NamedGraphGenerators: named_grid @@ -10,18 +9,16 @@ using ITensorNetworks: expect, random_tensornetwork, original_state_vertex -using Random: Random using SplitApplyCombine: group +using StableRNGs: StableRNG using Test: @test, @testset - @testset "Test Expect" begin - Random.seed!(1234) - #Test on a tree L, χ = 4, 2 g = NamedGraph(SimpleGraph(uniform_tree(L))) s = siteinds("S=1/2", g) - ψ = random_tensornetwork(s; link_space=χ) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, s; link_space=χ) sz_bp = expect(ψ, "Sz"; alg="bp") sz_exact = expect(ψ, "Sz"; alg="exact") @test sz_bp ≈ sz_exact @@ -30,7 +27,8 @@ using Test: @test, @testset L, χ = 2, 2 g = named_grid((L, L)) s = siteinds("S=1/2", g) - ψ = random_tensornetwork(s; link_space=χ) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, s; link_space=χ) cache_construction_function = f -> BeliefPropagationCache( f; partitioned_vertices=group(v -> (original_state_vertex(f, v)[1]), vertices(f)) diff --git a/test/test_forms.jl b/test/test_forms.jl index 41561f0b..a58822e5 100644 --- a/test/test_forms.jl +++ b/test/test_forms.jl @@ -22,18 +22,17 @@ using ITensorNetworks: update using ITensors: contract, dag, inds, prime, random_itensor using LinearAlgebra: norm +using StableRNGs: StableRNG using Test: @test, @testset -using Random: Random - @testset "FormNetworks" begin g = named_grid((1, 4)) s = siteinds("S=1/2", g) s_operator = union_all_inds(s, prime(s)) χ, D = 2, 3 - Random.seed!(1234) - ψket = random_tensornetwork(s; link_space=χ) - ψbra = random_tensornetwork(s; link_space=χ) - A = random_tensornetwork(s_operator; link_space=D) + rng = StableRNG(1234) + ψket = random_tensornetwork(rng, s; link_space=χ) + ψbra = random_tensornetwork(rng, s; link_space=χ) + A = random_tensornetwork(rng, s_operator; link_space=D) blf = BilinearFormNetwork(A, ψbra, ψket) @test nv(blf) == nv(ψket) + nv(ψbra) + nv(A) @@ -52,7 +51,8 @@ using Random: Random @test isempty(flatten_siteinds(qf)) v = (1, 1) - new_tensor = random_itensor(inds(ψket[v])) + rng = StableRNG(1234) + new_tensor = random_itensor(rng, inds(ψket[v])) qf_updated = update(qf, v, copy(new_tensor)) @test tensornetwork(qf_updated)[bra_vertex(qf_updated, v)] ≈ diff --git a/test/test_gauging.jl b/test/test_gauging.jl index 8b328a69..9eb4ebbf 100644 --- a/test/test_gauging.jl +++ b/test/test_gauging.jl @@ -13,7 +13,7 @@ using ITensors: diag_itensor, inds, inner using ITensors.NDTensors: vector using LinearAlgebra: diag using NamedGraphs.NamedGraphGenerators: named_grid -using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset @testset "gauging" begin @@ -23,8 +23,8 @@ using Test: @test, @testset s = siteinds("S=1/2", g) χ = 6 - Random.seed!(5467) - ψ = random_tensornetwork(s; link_space=χ) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, s; link_space=χ) # Move directly to vidal gauge ψ_vidal = VidalITensorNetwork( diff --git a/test/test_indsnetwork.jl b/test/test_indsnetwork.jl index acfee2e6..21fdb3f8 100644 --- a/test/test_indsnetwork.jl +++ b/test/test_indsnetwork.jl @@ -6,45 +6,35 @@ using ITensorNetworks: IndsNetwork, union_all_inds using ITensors: Index using ITensors.NDTensors: dim using NamedGraphs.NamedGraphGenerators: named_comb_tree -using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset - @testset "IndsNetwork constructors" begin - Random.seed!(1234) - # test on comb tree dims = (3, 2) c = named_comb_tree(dims) - ## specify some site and link indices in different ways - # one index per site - site_dims = [rand(2:6) for _ in 1:nv(c)] + rng = StableRNG(1234) + site_dims = [rand(rng, 2:6) for _ in 1:nv(c)] site_inds = Index.(site_dims) - # multiple indices per site - site_dims_multi = [[rand(2:6) for ni in 1:rand(1:3)] for _ in 1:nv(c)] + site_dims_multi = [[rand(rng, 2:6) for ni in 1:rand(rng, 1:3)] for _ in 1:nv(c)] site_inds_multi = map(x -> Index.(x), site_dims_multi) - # one index per link - link_dims = [rand(2:6) for _ in 1:ne(c)] + link_dims = [rand(rng, 2:6) for _ in 1:ne(c)] link_inds = Index.(link_dims) - # multiple indices per link - link_dims_multi = [[rand(2:6) for ni in 1:rand(1:3)] for _ in 1:ne(c)] + link_dims_multi = [[rand(rng, 2:6) for ni in 1:rand(rng, 1:3)] for _ in 1:ne(c)] link_inds_multi = map(x -> Index.(x), link_dims_multi) - # TODO: fix ambiguity due to vectors of QNBlocks... # test constructors - ## empty constructor is_emtpy = IndsNetwork(c) @test is_emtpy isa IndsNetwork @test isempty(vertex_data(is_emtpy)) && isempty(edge_data(is_emtpy)) - ## specify site and/or link spaces uniformly - uniform_dim = rand(2:6) - uniform_dim_multi = [rand(2:6) for _ in 1:rand(2:4)] + uniform_dim = rand(rng, 2:6) + uniform_dim_multi = [rand(rng, 2:6) for _ in 1:rand(rng, 2:4)] # only initialize sites is_usite = IndsNetwork(c; site_space=uniform_dim) @test is_usite isa IndsNetwork @@ -140,22 +130,28 @@ using Test: @test, @testset end @testset "IndsNetwork merging" begin - Random.seed!(1234) - # test on comb tree dims = (3, 2) c = named_comb_tree(dims) - site_dims1 = Dictionary(vertices(c), [[rand(2:6) for ni in 1:rand(1:3)] for _ in 1:nv(c)]) - site_dims2 = Dictionary(vertices(c), [[rand(2:6) for ni in 1:rand(1:3)] for _ in 1:nv(c)]) - link_dims1 = Dictionary(edges(c), [[rand(2:6) for ni in 1:rand(1:3)] for _ in 1:ne(c)]) - link_dims2 = Dictionary(edges(c), [[rand(2:6) for ni in 1:rand(1:3)] for _ in 1:ne(c)]) + rng = StableRNG(1234) + site_dims1 = Dictionary( + vertices(c), [[rand(rng, 2:6) for ni in 1:rand(rng, 1:3)] for _ in 1:nv(c)] + ) + site_dims2 = Dictionary( + vertices(c), [[rand(rng, 2:6) for ni in 1:rand(rng, 1:3)] for _ in 1:nv(c)] + ) + link_dims1 = Dictionary( + edges(c), [[rand(rng, 2:6) for ni in 1:rand(rng, 1:3)] for _ in 1:ne(c)] + ) + link_dims2 = Dictionary( + edges(c), [[rand(rng, 2:6) for ni in 1:rand(rng, 1:3)] for _ in 1:ne(c)] + ) is1_s = IndsNetwork(c; site_space=site_dims1) is2_s = IndsNetwork(c; site_space=site_dims2) is1_e = IndsNetwork(c; link_space=link_dims1) is2_e = IndsNetwork(c; link_space=link_dims2) is1 = IndsNetwork(c; site_space=site_dims1, link_space=link_dims1) is2 = IndsNetwork(c; site_space=site_dims2, link_space=link_dims2) - # merge some networks is1_m = union_all_inds(is1_s, is1_e) @test dim.(vertex_data(is1_m)) == dim.(vertex_data(is1)) diff --git a/test/test_inner.jl b/test/test_inner.jl index 427fadf0..b76da5ed 100644 --- a/test/test_inner.jl +++ b/test/test_inner.jl @@ -1,5 +1,4 @@ @eval module $(gensym()) - using ITensorNetworks: ITensorNetwork, inner, @@ -15,17 +14,16 @@ using ITensors: dag, siteinds using SplitApplyCombine: group using Graphs: SimpleGraph, uniform_tree using NamedGraphs: NamedGraph -using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset - @testset "Inner products, BP vs exact comparison" begin - Random.seed!(1234) L = 4 χ = 2 g = NamedGraph(SimpleGraph(uniform_tree(L))) s = siteinds("S=1/2", g) - y = random_tensornetwork(s; link_space=χ) - x = random_tensornetwork(s; link_space=χ) + rng = StableRNG(1234) + y = random_tensornetwork(rng, s; link_space=χ) + x = random_tensornetwork(rng, s; link_space=χ) #First lets do it with the flattened version of the network xy = inner_network(x, y) diff --git a/test/test_itensornetwork.jl b/test/test_itensornetwork.jl index ed4864b8..7e97c6a3 100644 --- a/test/test_itensornetwork.jl +++ b/test/test_itensornetwork.jl @@ -57,24 +57,19 @@ using NamedGraphs: NamedEdge using NamedGraphs.GraphsExtensions: incident_edges using NamedGraphs.NamedGraphGenerators: named_comb_tree, named_grid using NDTensors: NDTensors, dim -using Random: Random, randn! +using Random: randn! +using StableRNGs: StableRNG using Test: @test, @test_broken, @testset - const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) - @testset "ITensorNetwork tests" begin @testset "ITensorNetwork Basics" begin - Random.seed!(1234) g = named_grid((4,)) s = siteinds("S=1/2", g) - @test s isa IndsNetwork @test nv(s) == 4 @test ne(s) == 3 @test neighbors(s, (2,)) == [(1,), (3,)] - tn = ITensorNetwork(s; link_space=2) - @test nv(tn) == 4 @test ne(tn) == 3 @test tn isa ITensorNetwork @@ -100,9 +95,9 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test_broken tn[1:2] isa ITensorNetwork # TODO: Support this syntax, maybe rename `subgraph`. @test_broken induced_subgraph(tn, [(1,), (2,)]) isa ITensorNetwork - + rng = StableRNG(1234) for v in vertices(tn) - tn[v] = randn!(tn[v]) + tn[v] = randn!(rng, tn[v]) end tn′ = sim(dag(tn); sites=[]) @@ -175,17 +170,21 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) siteinds("S=1/2", named_grid((3, 3))), ) + rng = StableRNG(1234) ψ = ITensorNetwork(g; kwargs...) do v - return inds -> itensor(randn(elt, dim.(inds)...), inds) + return inds -> itensor(randn(rng, elt, dim.(inds)...), inds) end @test eltype(ψ[first(vertices(ψ))]) == elt + rng = StableRNG(1234) ψ = ITensorNetwork(g; kwargs...) do v - return inds -> itensor(randn(dim.(inds)...), inds) + return inds -> itensor(randn(rng, dim.(inds)...), inds) end @test eltype(ψ[first(vertices(ψ))]) == Float64 - ψ = random_tensornetwork(elt, g; kwargs...) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, elt, g; kwargs...) @test eltype(ψ[first(vertices(ψ))]) == elt - ψ = random_tensornetwork(g; kwargs...) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, g; kwargs...) @test eltype(ψ[first(vertices(ψ))]) == Float64 ψ = ITensorNetwork(elt, undef, g; kwargs...) @test eltype(ψ[first(vertices(ψ))]) == elt @@ -259,27 +258,24 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) end end end - @testset "random_tensornetwork with custom distributions" begin distribution = Uniform(-1.0, 1.0) - tn = random_tensornetwork(distribution, named_grid(4); link_space=2) + rng = StableRNG(1234) + tn = random_tensornetwork(rng, distribution, named_grid(4); link_space=2) # Note: distributions in package `Distributions` currently doesn't support customized # eltype, and all elements have type `Float64` @test eltype(tn[first(vertices(tn))]) == Float64 end - @testset "orthogonalize" begin - tn = random_tensornetwork(named_grid(4); link_space=2) + rng = StableRNG(1234) + tn = random_tensornetwork(rng, named_grid(4); link_space=2) Z = norm_sqr(tn) - tn_ortho = factorize(tn, 4 => 3) - # TODO: Error here in arranging the edges. Arrange by hash? Z̃ = norm_sqr(tn_ortho) @test nv(tn_ortho) == 5 @test nv(tn) == 4 @test Z ≈ Z̃ - tn_ortho = orthogonalize(tn, 4 => 3) Z̃ = norm_sqr(tn_ortho) @test nv(tn_ortho) == 4 @@ -353,9 +349,9 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) dims = (2, 2) g = named_grid(dims) s = siteinds("S=1/2", g) - ψ = random_tensornetwork(s; link_space=2) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, s; link_space=2) @test scalartype(ψ) == Float64 - ϕ = NDTensors.convert_scalartype(new_eltype, ψ) @test scalartype(ϕ) == new_eltype end @@ -365,7 +361,6 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) g = named_grid(dims) s = siteinds("S=1/2", g) state_map(v::Tuple) = iseven(sum(isodd.(v))) ? "↑" : "↓" - ψ = ITensorNetwork(state_map, s) t = ψ[2, 2] si = only(siteinds(ψ, (2, 2))) @@ -373,7 +368,6 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test eltype(t) == Float64 @test abs(t[si => "↑", [b => end for b in bi]...]) == 1.0 # insert_links introduces extra signs through factorization... @test t[si => "↓", [b => end for b in bi]...] == 0.0 - ϕ = ITensorNetwork(elt, state_map, s) t = ϕ[2, 2] si = only(siteinds(ϕ, (2, 2))) @@ -385,11 +379,11 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @testset "Priming and tagging" begin # TODO: add actual tests - tooth_lengths = fill(2, 3) c = named_comb_tree(tooth_lengths) is = siteinds("S=1/2", c) - tn = random_tensornetwork(is; link_space=3) + rng = StableRNG(1234) + tn = random_tensornetwork(rng, is; link_space=3) @test_broken swapprime(tn, 0, 2) end end diff --git a/test/test_itensorsextensions.jl b/test/test_itensorsextensions.jl index 1a60b247..cb4b5ed0 100644 --- a/test/test_itensorsextensions.jl +++ b/test/test_itensorsextensions.jl @@ -15,10 +15,8 @@ using ITensors: replaceinds, sim using ITensorNetworks.ITensorsExtensions: map_eigvals -using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset - -Random.seed!(1234) @testset "ITensorsExtensions" begin @testset "Test map eigvals without QNS (eltype=$elt, dim=$n)" for elt in ( Float32, Float64, Complex{Float32}, Complex{Float64} @@ -27,7 +25,8 @@ Random.seed!(1234) i, j = Index(n, "i"), Index(n, "j") linds, rinds = Index[i], Index[j] - A = randn(elt, (n, n)) + rng = StableRNG(1234) + A = randn(rng, elt, (n, n)) A = A * A' P = ITensor(A, i, j) sqrtP = map_eigvals(sqrt, P, linds, rinds; ishermitian=true) @@ -53,7 +52,8 @@ Random.seed!(1234) n in (2, 3, 5, 10) i, j = Index.(([QN() => n], [QN() => n])) - A = random_itensor(elt, i, j) + rng = StableRNG(1234) + A = random_itensor(rng, elt, i, j) P = A * prime(dag(A), i) sqrtP = map_eigvals(sqrt, P, i, i'; ishermitian=true) inv_P = dag(map_eigvals(inv, P, i, i'; ishermitian=true)) diff --git a/test/test_treetensornetworks/test_expect.jl b/test/test_treetensornetworks/test_expect.jl index 48ce2aa9..9bf55248 100644 --- a/test/test_treetensornetworks/test_expect.jl +++ b/test/test_treetensornetworks/test_expect.jl @@ -4,12 +4,13 @@ using ITensorMPS: MPS using ITensorNetworks: ttn, expect, random_mps, siteinds using LinearAlgebra: norm using NamedGraphs.NamedGraphGenerators: named_comb_tree +using StableRNGs: StableRNG using Test: @test, @testset - @testset "MPS expect comparison with ITensors" begin N = 4 s = siteinds("S=1/2", N) - a = random_mps(s; link_space=100) + rng = StableRNG(1234) + a = random_mps(rng, s; link_space=100) @test norm(a) ≈ 1 b = MPS([a[v] for v in vertices(a)]) res_a = expect("Sz", a) @@ -17,7 +18,6 @@ using Test: @test, @testset res_a = [res_a[v] for v in vertices(a)] @test res_a ≈ res_b rtol = 1e-6 end - @testset "TTN expect" begin tooth_lengths = fill(2, 2) c = named_comb_tree(tooth_lengths) diff --git a/test/test_treetensornetworks/test_solvers/Project.toml b/test/test_treetensornetworks/test_solvers/Project.toml index c377be4d..77225041 100644 --- a/test/test_treetensornetworks/test_solvers/Project.toml +++ b/test/test_treetensornetworks/test_solvers/Project.toml @@ -9,4 +9,5 @@ NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/test_treetensornetworks/test_solvers/test_contract.jl b/test/test_treetensornetworks/test_solvers/test_contract.jl index cc7dfaaf..8255eaec 100644 --- a/test/test_treetensornetworks/test_solvers/test_contract.jl +++ b/test/test_treetensornetworks/test_solvers/test_contract.jl @@ -20,12 +20,14 @@ using ITensors: prime, replaceinds, replaceprime using ITensorMPS: ITensorMPS using LinearAlgebra: norm, normalize using NamedGraphs.NamedGraphGenerators: named_comb_tree +using StableRNGs: StableRNG using Test: @test, @test_broken, @testset @testset "Contract MPO" begin N = 20 s = siteinds("S=1/2", N) - psi = random_mps(s; link_space=8) + rng = StableRNG(1234) + psi = random_mps(rng, s; link_space=8) os = OpSum() for j in 1:(N - 1) os += 0.5, "S+", j, "S-", j + 1 @@ -88,7 +90,8 @@ using Test: @test, @test_broken, @testset @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-5 # Test with nsite=1 - Hpsi_guess = random_mps(t; link_space=32) + rng = StableRNG(1234) + Hpsi_guess = random_mps(rng, t; link_space=32) Hpsi = contract(H, psi; alg="fit", init=Hpsi_guess, nsites=1, nsweeps=4) @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-4 end @@ -99,7 +102,8 @@ end c = named_comb_tree(tooth_lengths) s = siteinds("S=1/2", c) - psi = normalize(random_ttn(s; link_space=8)) + rng = StableRNG(1234) + psi = normalize(random_ttn(rng, s; link_space=8)) os = ModelHamiltonians.heisenberg(c; J1=1, J2=1) H = ttn(os, s) @@ -141,7 +145,8 @@ end @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-5 # Test with nsite=1 - Hpsi_guess = random_ttn(t; link_space=32) + rng = StableRNG(1234) + Hpsi_guess = random_ttn(rng, t; link_space=32) Hpsi = contract(H, psi; alg="fit", nsites=1, nsweeps=10, init=Hpsi_guess) @test inner(psit, Hpsi) ≈ inner(psit, H, psi) rtol = 1e-2 end diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg.jl b/test/test_treetensornetworks/test_solvers/test_dmrg.jl index 2f638e90..b352d43c 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg.jl @@ -21,6 +21,7 @@ using ITensors: ITensors using KrylovKit: eigsolve using NamedGraphs.NamedGraphGenerators: named_comb_tree using Observers: observer +using StableRNGs: StableRNG using Test: @test, @test_broken, @testset # This is needed since `eigen` is broken @@ -43,7 +44,8 @@ ITensors.disable_auto_fermion() H = mpo(os, s) - psi = random_mps(s; link_space=20) + rng = StableRNG(1234) + psi = random_mps(rng, s; link_space=20) nsweeps = 10 maxdim = [10, 20, 40, 100] @@ -87,7 +89,8 @@ end os += "Sz", j, "Sz", j + 1 end H = mpo(os, s) - psi = random_mps(s; link_space=20) + rng = StableRNG(1234) + psi = random_mps(rng, s; link_space=20) nsweeps = 4 maxdim = [20, 40, 80, 80] @@ -124,7 +127,8 @@ end os += "Sz", j, "Sz", j + 1 end H = mpo(os, s) - psi = random_mps(s; link_space=10) + rng = StableRNG(1234) + psi = random_mps(rng, s; link_space=10) nsweeps = 4 maxdim = [10, 20, 40, 80] @@ -156,7 +160,8 @@ end H = mpo(os, s) - psi = random_mps(s; link_space=20) + rng = StableRNG(1234) + psi = random_mps(rng, s; link_space=20) # Choose nsweeps to be less than length of arrays nsweeps = 5 @@ -193,7 +198,8 @@ end states = v -> d[v] psi = ttn(states, s) - # psi = random_ttn(s; link_space=20) #FIXME: random_ttn broken for QN conserving case + # rng = StableRNG(1234) + # psi = random_ttn(rng, s; link_space=20) #FIXME: random_ttn broken for QN conserving case nsweeps = 10 maxdim = [10, 20, 40, 100] @@ -207,7 +213,8 @@ end vmap = Dictionary(collect(vertices(s))[linear_order], 1:length(linear_order)) sline = only.(collect(vertex_data(s)))[linear_order] Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) - psiline = ITensorMPS.random_mps(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) + rng = StableRNG(1234) + psiline = ITensorMPS.random_mps(rng, sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e2, psi2 = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) @test inner(psi', H, psi) ≈ inner(psi2', Hline, psi2) atol = 1e-5 @@ -242,7 +249,8 @@ end # get MPS / MPO with JW string result ITensors.disable_auto_fermion() Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) - psiline = ITensorMPS.random_mps(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) + rng = StableRNG(1234) + psiline = ITensorMPS.random_mps(rng, sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e_jw, psi_jw = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) ITensors.enable_auto_fermion() @@ -261,7 +269,8 @@ end # Compare to `ITensors.MPO` version of `dmrg` Hline = ITensorMPS.MPO(replace_vertices(v -> vmap[v], os), sline) - psiline = ITensorMPS.random_mps(sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) + rng = StableRNG(1234) + psiline = ITensorMPS.random_mps(rng, sline, i -> isodd(i) ? "Up" : "Dn"; linkdims=20) e2, psi2 = dmrg(Hline, psiline; nsweeps, maxdim, cutoff, outputlevel=0) @test inner(psi', H, psi) ≈ inner(psi2', Hline, psi2) atol = 1e-5 @@ -282,7 +291,8 @@ end s = siteinds("S=1/2", c) os = ModelHamiltonians.heisenberg(c) H = ttn(os, s) - psi = random_ttn(s; link_space=5) + rng = StableRNG(1234) + psi = random_ttn(rng, s; link_space=5) e, psi = dmrg(H, psi; nsweeps, maxdim, nsites) @test all(edge_data(linkdims(psi)) .<= maxdim) diff --git a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl index 73c4d805..f718ba7d 100644 --- a/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl +++ b/test/test_treetensornetworks/test_solvers/test_dmrg_x.jl @@ -8,73 +8,55 @@ using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using ITensors: @disable_warn_order, array, dag, onehot, uniqueind using LinearAlgebra: eigen, normalize using NamedGraphs.NamedGraphGenerators: named_comb_tree -using Random: Random +using StableRNGs: StableRNG using Test: @test, @testset - # TODO: Combine MPS and TTN tests. @testset "MPS DMRG-X" for conserve_qns in (false, true) n = 10 s = siteinds("S=1/2", n; conserve_qns) - - Random.seed!(123) - W = 12 # Random fields h ∈ [-W, W] - h = W * (2 * rand(n) .- 1) + rng = StableRNG(1234) + h = W * (2 * rand(rng, n) .- 1) H = mpo(ModelHamiltonians.heisenberg(n; h), s) - - ψ = mps(v -> rand(["↑", "↓"]), s) - + ψ = mps(v -> rand(rng, ["↑", "↓"]), s) dmrg_x_kwargs = (nsweeps=20, normalize=true, maxdim=20, cutoff=1e-10, outputlevel=0) - e, ϕ = dmrg_x(H, ψ; nsites=2, dmrg_x_kwargs...) - @test inner(ϕ', H, ϕ) / inner(ϕ, ϕ) ≈ e @test inner(ψ', H, ψ) / inner(ψ, ψ) ≈ inner(ϕ', H, ϕ) / inner(ϕ, ϕ) rtol = 1e-1 @test inner(H, ψ, H, ψ) ≉ inner(ψ', H, ψ)^2 rtol = 1e-7 @test inner(H, ϕ, H, ϕ) ≈ inner(ϕ', H, ϕ)^2 rtol = 1e-7 - e, ϕ̃ = dmrg_x(H, ϕ; nsites=1, dmrg_x_kwargs...) - @test inner(ϕ̃', H, ϕ̃) / inner(ϕ̃, ϕ̃) ≈ e @test inner(ψ', H, ψ) / inner(ψ, ψ) ≈ inner(ϕ̃', H, ϕ̃) / inner(ϕ̃, ϕ̃) rtol = 1e-1 @test inner(H, ϕ̃, H, ϕ̃) ≈ inner(ϕ̃', H, ϕ̃)^2 rtol = 1e-3 # Sometimes broken, sometimes not # @test abs(loginner(ϕ̃, ϕ) / n) ≈ 0.0 atol = 1e-6 end - @testset "Tree DMRG-X" for conserve_qns in (false, true) + # TODO: Combine with tests above into a loop over graph structures. tooth_lengths = fill(2, 3) root_vertex = (3, 2) c = named_comb_tree(tooth_lengths) s = siteinds("S=1/2", c; conserve_qns) - - Random.seed!(12) - W = 12 # Random fields h ∈ [-W, W] - h = Dictionary(vertices(c), W * (2 * rand(nv(c)) .- 1)) - + rng = StableRNG(123) + h = Dictionary(vertices(c), W * (2 * rand(rng, nv(c)) .- 1)) H = ttn(ModelHamiltonians.heisenberg(c; h), s) - ψ = normalize(ttn(v -> rand(["↑", "↓"]), s)) - + ψ = normalize(ttn(v -> rand(rng, ["↑", "↓"]), s)) dmrg_x_kwargs = (nsweeps=20, normalize=true, maxdim=20, cutoff=1e-10, outputlevel=0) - e, ϕ = dmrg_x(H, ψ; nsites=2, dmrg_x_kwargs...) - @test inner(ϕ', H, ϕ) / inner(ϕ, ϕ) ≈ e @test inner(ψ', H, ψ) / inner(ψ, ψ) ≈ inner(ϕ', H, ϕ) / inner(ϕ, ϕ) rtol = 1e-1 @test inner(H, ψ, H, ψ) ≉ inner(ψ', H, ψ)^2 rtol = 1e-2 @test inner(H, ϕ, H, ϕ) ≈ inner(ϕ', H, ϕ)^2 rtol = 1e-7 - e, ϕ̃ = dmrg_x(H, ϕ; nsites=1, dmrg_x_kwargs...) - @test inner(ϕ̃', H, ϕ̃) / inner(ϕ̃, ϕ̃) ≈ e @test inner(ψ', H, ψ) / inner(ψ, ψ) ≈ inner(ϕ̃', H, ϕ̃) / inner(ϕ̃, ϕ̃) rtol = 1e-1 @test inner(H, ϕ̃, H, ϕ̃) ≈ inner(ϕ̃', H, ϕ̃)^2 rtol = 1e-6 # Sometimes broken, sometimes not # @test abs(loginner(ϕ̃, ϕ) / nv(c)) ≈ 0.0 atol = 1e-8 - # compare against ED @disable_warn_order U0 = contract(ψ, root_vertex) @disable_warn_order T = contract(H, root_vertex) diff --git a/test/test_treetensornetworks/test_solvers/test_linsolve.jl b/test/test_treetensornetworks/test_solvers/test_linsolve.jl index 3c62bfc0..dab969ed 100644 --- a/test/test_treetensornetworks/test_solvers/test_linsolve.jl +++ b/test/test_treetensornetworks/test_solvers/test_linsolve.jl @@ -1,7 +1,7 @@ @eval module $(gensym()) using ITensorNetworks: ITensorNetworks, OpSum, apply, dmrg, inner, mpo, random_mps, siteinds using KrylovKit: linsolve -using Random: Random +using StableRNGs: StableRNG using Test: @test, @test_broken, @testset @testset "Linsolve" begin @@ -25,18 +25,18 @@ using Test: @test, @test_broken, @testset # # Test complex case # - Random.seed!(1234) + rng = StableRNG(1234) ## TODO: Need to add support for `random_mps`/`random_tensornetwork` with state input. ## states = [isodd(n) ? "Up" : "Dn" for n in 1:N] - ## x_c = random_mps(states, s; link_space=4) + 0.1im * random_mps(states, s; link_space=2) - x_c = random_mps(s; link_space=4) + 0.1im * random_mps(s; link_space=2) + ## x_c = random_mps(rng, states, s; link_space=4) + 0.1im * random_mps(rng, states, s; link_space=2) + x_c = random_mps(rng, s; link_space=4) + 0.1im * random_mps(rng, s; link_space=2) b = apply(H, x_c; alg="fit", nsweeps=3, init=x_c) #cutoff is unsupported kwarg for apply/contract ## TODO: Need to add support for `random_mps`/`random_tensornetwork` with state input. - ## x0 = random_mps(states, s; link_space=10) - x0 = random_mps(s; link_space=10) + ## x0 = random_mps(rng, states, s; link_space=10) + x0 = random_mps(rng, s; link_space=10) x = @test_broken linsolve( H, b, x0; cutoff, maxdim, nsweeps, updater_kwargs=(; tol=1E-6, ishermitian=true) diff --git a/test/test_treetensornetworks/test_solvers/test_tdvp.jl b/test/test_treetensornetworks/test_solvers/test_tdvp.jl index f07251e8..82dc7d38 100644 --- a/test/test_treetensornetworks/test_solvers/test_tdvp.jl +++ b/test/test_treetensornetworks/test_solvers/test_tdvp.jl @@ -18,8 +18,8 @@ using ITensorNetworks.ModelHamiltonians: ModelHamiltonians using LinearAlgebra: norm using NamedGraphs.NamedGraphGenerators: named_binary_tree, named_comb_tree using Observers: observer +using StableRNGs: StableRNG using Test: @testset, @test - @testset "MPS TDVP" begin @testset "Basic TDVP" begin N = 10 @@ -35,7 +35,8 @@ using Test: @testset, @test H = mpo(os, s) - ψ0 = random_mps(s; link_space=10) + rng = StableRNG(1234) + ψ0 = random_mps(rng, s; link_space=10) # Time evolve forward: ψ1 = tdvp(H, -0.1im, ψ0; nsweeps=1, cutoff, nsites=1) @@ -96,7 +97,8 @@ using Test: @testset, @test H2 = mpo(os2, s) Hs = [H1, H2] - ψ0 = random_mps(s; link_space=10) + rng = StableRNG(1234) + ψ0 = random_mps(rng, s; link_space=10) ψ1 = tdvp(Hs, -0.1im, ψ0; nsweeps=1, cutoff, nsites=1) @@ -133,7 +135,8 @@ using Test: @testset, @test H = mpo(os, s) - ψ0 = random_mps(s; link_space=10) + rng = StableRNG(1234) + ψ0 = random_mps(rng, s; link_space=10) # Time evolve forward: ψ1 = tdvp(H, -0.1im, ψ0; time_step=-0.05im, order, cutoff, nsites=1) @@ -329,7 +332,8 @@ using Test: @testset, @test H = mpo(os, s) - state = random_mps(s; link_space=2) + rng = StableRNG(1234) + state = random_mps(rng, s; link_space=2) en0 = inner(state', H, state) nsites = [repeat([2], 10); repeat([1], 10)] maxdim = 32 @@ -423,7 +427,8 @@ end H = ttn(os, s) - ψ0 = normalize(random_ttn(s)) + rng = StableRNG(1234) + ψ0 = normalize(random_ttn(rng, s)) # Time evolve forward: ψ1 = tdvp(H, -0.1im, ψ0; root_vertex, nsweeps=1, cutoff, nsites=2) @@ -465,7 +470,8 @@ end H2 = ttn(os2, s) Hs = [H1, H2] - ψ0 = normalize(random_ttn(s; link_space=10)) + rng = StableRNG(1234) + ψ0 = normalize(random_ttn(rng, s; link_space=10)) ψ1 = tdvp(Hs, -0.1im, ψ0; nsweeps=1, cutoff, nsites=1) @@ -633,7 +639,8 @@ end os = ModelHamiltonians.heisenberg(c) H = ttn(os, s) - state = normalize(random_ttn(s; link_space=2)) + rng = StableRNG(1234) + state = normalize(random_ttn(rng, s; link_space=2)) trange = 0.0:tau:ttotal for (step, t) in enumerate(trange) diff --git a/test/test_ttno.jl b/test/test_ttno.jl index 3a5933ef..8d147339 100644 --- a/test/test_ttno.jl +++ b/test/test_ttno.jl @@ -5,26 +5,26 @@ using ITensors: @disable_warn_order, prime, random_itensor using LinearAlgebra: norm using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: shuffle +using StableRNGs: StableRNG using Test: @test, @testset - @testset "TTN operator Basics" begin - # random comb tree - tooth_lengths = rand(1:3, rand(2:4)) + rng = StableRNG(1234) + tooth_lengths = rand(rng, 1:3, rand(rng, 2:4)) c = named_comb_tree(tooth_lengths) # specify random site dimension on every site - dmap = v -> rand(1:3) + dmap = v -> rand(rng, 1:3) is = siteinds(dmap, c) # operator site inds is_isp = union_all_inds(is, prime(is; links=[])) # specify random linear vertex ordering of graph vertices - vertex_order = shuffle(collect(vertices(c))) - + vertex_order = shuffle(rng, collect(vertices(c))) @testset "Construct TTN operator from ITensor or Array" begin cutoff = 1e-10 sites_o = [is_isp[v] for v in vertex_order] # create random ITensor with these indices - O = random_itensor(sites_o...) + rng = StableRNG(1234) + O = random_itensor(rng, sites_o...) # dense TTN constructor from IndsNetwork @disable_warn_order o1 = ttn(O, is_isp; cutoff) root_vertex = only(ortho_region(o1)) @@ -33,7 +33,6 @@ using Test: @test, @testset end @test norm(O - O1) < 1e2 * cutoff end - @testset "Ortho" begin # TODO end diff --git a/test/test_ttns.jl b/test/test_ttns.jl index efe1edcc..5eafce71 100644 --- a/test/test_ttns.jl +++ b/test/test_ttns.jl @@ -6,23 +6,24 @@ using ITensors: @disable_warn_order, random_itensor using LinearAlgebra: norm using NamedGraphs.NamedGraphGenerators: named_comb_tree using Random: shuffle +using StableRNGs: StableRNG using Test: @test, @testset - @testset "TTN Basics" begin - # random comb tree - tooth_lengths = rand(1:3, rand(2:4)) + rng = StableRNG(1234) + tooth_lengths = rand(rng, 1:3, rand(rng, 2:4)) c = named_comb_tree(tooth_lengths) # specify random site dimension on every site - dmap = v -> rand(1:3) + dmap = v -> rand(rng, 1:3) is = siteinds(dmap, c) # specify random linear vertex ordering of graph vertices - vertex_order = shuffle(collect(vertices(c))) + vertex_order = shuffle(rng, collect(vertices(c))) @testset "Construct TTN from ITensor or Array" begin cutoff = 1e-10 # create random ITensor with these indices - S = random_itensor(vertex_data(is)...) + rng = StableRNG(1234) + S = random_itensor(rng, vertex_data(is)...) # dense TTN constructor from IndsNetwork @disable_warn_order s1 = ttn(S, is; cutoff) root_vertex = only(ortho_region(s1)) From 1bd3e442e81b713816aa3d16320ab648d2dcc407 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Tue, 21 May 2024 13:57:13 -0400 Subject: [PATCH 05/11] Define conj for AbstractITensorNetwork and @preserve_graph macro (#185) * Define conj for AbstractITensorNetwork and @preserve_graph macro * Bump to v0.11.11 --- Project.toml | 4 +- src/abstractitensornetwork.jl | 74 ++++++++++++++++++++++++++--------- test/test_itensornetwork.jl | 11 ++++++ 3 files changed, 69 insertions(+), 20 deletions(-) diff --git a/Project.toml b/Project.toml index 2765a618..e20bc55b 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.10" +version = "0.11.11" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -19,6 +19,7 @@ IsApprox = "28f27b66-4bd8-47e7-9110-e2746eb8bed7" IterTools = "c8e1da08-722c-5040-9ed9-7db0dc04731e" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" PackageExtensionCompat = "65ce6f38-6b18-4e1d-a461-8949797d7930" @@ -63,6 +64,7 @@ ITensors = "0.6.8" IsApprox = "0.1" IterTools = "1.4.0" KrylovKit = "0.6, 0.7" +MacroTools = "0.5" NDTensors = "0.3" NamedGraphs = "0.6.0" OMEinsumContractionOrders = "0.8.3" diff --git a/src/abstractitensornetwork.jl b/src/abstractitensornetwork.jl index b7d75327..6f6ee164 100644 --- a/src/abstractitensornetwork.jl +++ b/src/abstractitensornetwork.jl @@ -39,6 +39,7 @@ using ITensors: using ITensorMPS: ITensorMPS, add, linkdim, linkinds, siteinds using .ITensorsExtensions: ITensorsExtensions, indtype, promote_indtype using LinearAlgebra: LinearAlgebra, factorize +using MacroTools: @capture using NamedGraphs: NamedGraphs, NamedGraph, not_implemented using NamedGraphs.GraphsExtensions: ⊔, directed_graph, incident_edges, rename_vertices, vertextype @@ -138,6 +139,30 @@ function setindex_preserve_graph!(tn::AbstractITensorNetwork, value, vertex) return tn end +# TODO: Move to `BaseExtensions` module. +function is_setindex!_expr(expr::Expr) + return is_assignment_expr(expr) && is_getindex_expr(first(expr.args)) +end +is_setindex!_expr(x) = false +is_getindex_expr(expr::Expr) = (expr.head === :ref) +is_getindex_expr(x) = false +is_assignment_expr(expr::Expr) = (expr.head === :(=)) +is_assignment_expr(expr) = false + +# TODO: Define this in terms of a function mapping +# preserve_graph_function(::typeof(setindex!)) = setindex!_preserve_graph +# preserve_graph_function(::typeof(map_vertex_data)) = map_vertex_data_preserve_graph +# Also allow annotating codeblocks like `@views`. +macro preserve_graph(expr) + if !is_setindex!_expr(expr) + error( + "preserve_graph must be used with setindex! syntax (as @preserve_graph a[i,j,...] = value)", + ) + end + @capture(expr, array_[indices__] = value_) + return :(setindex_preserve_graph!($(esc(array)), $(esc(value)), $(esc.(indices)...))) +end + function ITensors.hascommoninds(tn::AbstractITensorNetwork, edge::Pair) return hascommoninds(tn, edgetype(tn)(edge)) end @@ -148,7 +173,7 @@ end function Base.setindex!(tn::AbstractITensorNetwork, value, v) # v = to_vertex(tn, index...) - setindex_preserve_graph!(tn, value, v) + @preserve_graph tn[v] = value for edge in incident_edges(tn, v) rem_edge!(tn, edge) end @@ -297,12 +322,12 @@ function ITensors.replaceinds( @assert underlying_graph(is) == underlying_graph(is′) for v in vertices(is) isassigned(is, v) || continue - setindex_preserve_graph!(tn, replaceinds(tn[v], is[v] => is′[v]), v) + @preserve_graph tn[v] = replaceinds(tn[v], is[v] => is′[v]) end for e in edges(is) isassigned(is, e) || continue for v in (src(e), dst(e)) - setindex_preserve_graph!(tn, replaceinds(tn[v], is[e] => is′[e]), v) + @preserve_graph tn[v] = replaceinds(tn[v], is[e] => is′[e]) end end return tn @@ -361,13 +386,31 @@ end LinearAlgebra.adjoint(tn::Union{IndsNetwork,AbstractITensorNetwork}) = prime(tn) -#dag(tn::AbstractITensorNetwork) = map_vertex_data(dag, tn) -function ITensors.dag(tn::AbstractITensorNetwork) - tndag = copy(tn) - for v in vertices(tndag) - setindex_preserve_graph!(tndag, dag(tndag[v]), v) +function map_vertex_data(f, tn::AbstractITensorNetwork) + tn = copy(tn) + for v in vertices(tn) + tn[v] = f(tn[v]) end - return tndag + return tn +end + +# TODO: Define `@preserve_graph map_vertex_data(f, tn)` +function map_vertex_data_preserve_graph(f, tn::AbstractITensorNetwork) + tn = copy(tn) + for v in vertices(tn) + @preserve_graph tn[v] = f(tn[v]) + end + return tn +end + +function Base.conj(tn::AbstractITensorNetwork) + # TODO: Use `@preserve_graph map_vertex_data(f, tn)` + return map_vertex_data_preserve_graph(conj, tn) +end + +function ITensors.dag(tn::AbstractITensorNetwork) + # TODO: Use `@preserve_graph map_vertex_data(f, tn)` + return map_vertex_data_preserve_graph(dag, tn) end # TODO: should this make sure that internal indices @@ -442,9 +485,7 @@ function NDTensors.contract( for n_dst in neighbors_dst add_edge!(tn, merged_vertex => n_dst) end - - setindex_preserve_graph!(tn, new_itensor, merged_vertex) - + @preserve_graph tn[merged_vertex] = new_itensor return tn end @@ -533,13 +574,8 @@ function LinearAlgebra.factorize( add_edge!(tn, X_vertex => nX) end add_edge!(tn, Y_vertex => dst(edge)) - - # tn[X_vertex] = X - setindex_preserve_graph!(tn, X, X_vertex) - - # tn[Y_vertex] = Y - setindex_preserve_graph!(tn, Y, Y_vertex) - + @preserve_graph tn[X_vertex] = X + @preserve_graph tn[Y_vertex] = Y return tn end diff --git a/test/test_itensornetwork.jl b/test/test_itensornetwork.jl index 7e97c6a3..ba3caa01 100644 --- a/test/test_itensornetwork.jl +++ b/test/test_itensornetwork.jl @@ -175,6 +175,17 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) return inds -> itensor(randn(rng, elt, dim.(inds)...), inds) end @test eltype(ψ[first(vertices(ψ))]) == elt + + ψc = conj(ψ) + for v in vertices(ψ) + @test ψc[v] == conj(ψ[v]) + end + + ψd = dag(ψ) + for v in vertices(ψ) + @test ψd[v] == dag(ψ[v]) + end + rng = StableRNG(1234) ψ = ITensorNetwork(g; kwargs...) do v return inds -> itensor(randn(rng, dim.(inds)...), inds) From 4efceb86dee7b15bcd37f2bb5b171180c305deb3 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Wed, 22 May 2024 14:22:34 -0400 Subject: [PATCH 06/11] Generalize siteinds constructor (#186) * Generalize siteinds constructor * Bump to v0.11.12 --- Project.toml | 2 +- src/sitetype.jl | 25 ++++++++++++++++--------- test/test_sitetype.jl | 12 +++++++++++- 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/Project.toml b/Project.toml index e20bc55b..a2cd740d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.11" +version = "0.11.12" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" diff --git a/src/sitetype.jl b/src/sitetype.jl index 407fa2e4..570f711c 100644 --- a/src/sitetype.jl +++ b/src/sitetype.jl @@ -11,18 +11,25 @@ function ITensors.siteind(d::Integer, v; addtags="", kwargs...) return ITensors.addtags(Index(d; tags="Site, $addtags", kwargs...), vertex_tag(v)) end -function ITensors.siteinds(sitetypes::AbstractDictionary, g::AbstractGraph; kwargs...) - is = IndsNetwork(g) - for v in vertices(g) - is[v] = [siteind(sitetypes[v], vertex_tag(v); kwargs...)] - end - return is +to_siteinds_callable(x) = Returns(x) +function to_siteinds_callable(x::AbstractDictionary) + return Base.Fix1(getindex, x) ∘ keytype(x) +end + +function ITensors.siteinds(x, g::AbstractGraph; kwargs...) + return siteinds(to_siteinds_callable(x), g; kwargs...) end -function ITensors.siteinds(sitetype, g::AbstractGraph; kwargs...) - return siteinds(Dictionary(vertices(g), fill(sitetype, nv(g))), g; kwargs...) +function to_siteind(x, vertex; kwargs...) + return [siteind(x, vertex_tag(vertex); kwargs...)] end +to_siteind(x::Index, vertex; kwargs...) = [x] + function ITensors.siteinds(f::Function, g::AbstractGraph; kwargs...) - return siteinds(Dictionary(vertices(g), map(v -> f(v), vertices(g))), g; kwargs...) + is = IndsNetwork(g) + for v in vertices(g) + is[v] = to_siteind(f(v), v; kwargs...) + end + return is end diff --git a/test/test_sitetype.jl b/test/test_sitetype.jl index 77075d8d..33283e91 100644 --- a/test/test_sitetype.jl +++ b/test/test_sitetype.jl @@ -3,7 +3,7 @@ using DataGraphs: vertex_data using Dictionaries: Dictionary using Graphs: nv, vertices using ITensorNetworks: IndsNetwork, siteinds -using ITensors: SiteType, hastags, space +using ITensors: Index, SiteType, hastags, space using ITensors.NDTensors: dim using NamedGraphs.NamedGraphGenerators: named_grid using Test: @test, @testset @@ -18,6 +18,16 @@ using Test: @test, @testset fdim(v::Tuple) = space(SiteType(ftype(v))) testtag = "TestTag" + d1 = map(v -> Index(2), vertices(g)) + d2 = map(v -> "S=1/2", vertices(g)) + for x in (v -> d1[v], d1, v -> d2[v], d2) + s = siteinds(x, g) + @test s[1, 1] isa Vector{<:Index} + @test s[1, 2] isa Vector{<:Index} + @test s[2, 1] isa Vector{<:Index} + @test s[2, 2] isa Vector{<:Index} + end + # uniform string sitetype s_us = siteinds(sitetypes[1], g; addtags=testtag) @test s_us isa IndsNetwork From b4659b93f643c0c0524061940e50746fd7abf486 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Thu, 30 May 2024 13:32:14 -0400 Subject: [PATCH 07/11] Add support for Adapt which enables converting tensor networks to GPU (#187) * Add support for Adapt which enables converting tensor networks to GPU * Bump to v0.11.13 --- Project.toml | 6 ++++- .../ITensorNetworksAdaptExt.jl | 14 +++++++++++ test/Project.toml | 1 + test/test_ext/Project.toml | 5 ++++ test/test_ext/test_itensornetworksadaptext.jl | 23 +++++++++++++++++++ 5 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 ext/ITensorNetworksAdaptExt/ITensorNetworksAdaptExt.jl create mode 100644 test/test_ext/Project.toml create mode 100644 test/test_ext/test_itensornetworksadaptext.jl diff --git a/Project.toml b/Project.toml index a2cd740d..877465cc 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.12" +version = "0.11.13" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -36,12 +36,14 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6" [weakdeps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5" GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889" OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" [extensions] +ITensorNetworksAdaptExt = "Adapt" ITensorNetworksEinExprsExt = "EinExprs" ITensorNetworksGraphsFlowsExt = "GraphsFlows" ITensorNetworksOMEinsumContractionOrdersExt = "OMEinsumContractionOrders" @@ -49,6 +51,7 @@ ITensorNetworksObserversExt = "Observers" [compat] AbstractTrees = "0.4.4" +Adapt = "4" Combinatorics = "1" Compat = "3, 4" DataGraphs = "0.2.3" @@ -82,6 +85,7 @@ TupleTools = "1.4" julia = "1.10" [extras] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5" GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889" OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" diff --git a/ext/ITensorNetworksAdaptExt/ITensorNetworksAdaptExt.jl b/ext/ITensorNetworksAdaptExt/ITensorNetworksAdaptExt.jl new file mode 100644 index 00000000..0051da93 --- /dev/null +++ b/ext/ITensorNetworksAdaptExt/ITensorNetworksAdaptExt.jl @@ -0,0 +1,14 @@ +module ITensorNetworksAdaptExt +using Adapt: Adapt, adapt +using ITensorNetworks: AbstractITensorNetwork, map_vertex_data_preserve_graph +function Adapt.adapt_structure(to, tn::AbstractITensorNetwork) + # TODO: Define and use: + # + # @preserve_graph map_vertex_data(adapt(to), tn) + # + # or just: + # + # @preserve_graph map(adapt(to), tn) + return map_vertex_data_preserve_graph(adapt(to), tn) +end +end diff --git a/test/Project.toml b/test/Project.toml index 3dd73b41..70eb14d3 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,5 +1,6 @@ [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" DataGraphs = "b5a273c3-7e6c-41f6-98bd-8d7f1525a36a" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" diff --git a/test/test_ext/Project.toml b/test/test_ext/Project.toml new file mode 100644 index 00000000..4d124728 --- /dev/null +++ b/test/test_ext/Project.toml @@ -0,0 +1,5 @@ +[deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +ITensorNetworks = "2919e153-833c-4bdc-8836-1ea460a35fc7" +ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" diff --git a/test/test_ext/test_itensornetworksadaptext.jl b/test/test_ext/test_itensornetworksadaptext.jl new file mode 100644 index 00000000..0578511a --- /dev/null +++ b/test/test_ext/test_itensornetworksadaptext.jl @@ -0,0 +1,23 @@ +@eval module $(gensym()) +using Adapt: Adapt, adapt +using NamedGraphs.NamedGraphGenerators: named_grid +using ITensorNetworks: random_tensornetwork, siteinds +using ITensors: ITensors +using Test: @test, @testset + +struct SinglePrecisionAdaptor end +single_precision(::Type{<:AbstractFloat}) = Float32 +single_precision(type::Type{<:Complex}) = complex(single_precision(real(type))) +Adapt.adapt_storage(::SinglePrecisionAdaptor, x) = single_precision(eltype(x)).(x) + +@testset "Test ITensorNetworksAdaptExt (eltype=$elt)" for elt in ( + Float32, Float64, Complex{Float32}, Complex{Float64} +) + g = named_grid((2, 2)) + s = siteinds("S=1/2", g) + tn = random_tensornetwork(elt, s) + @test ITensors.scalartype(tn) === elt + tn′ = adapt(SinglePrecisionAdaptor(), tn) + @test ITensors.scalartype(tn′) === single_precision(elt) +end +end From a9501bc6698bdd748fb9be024cc7e92de097437d Mon Sep 17 00:00:00 2001 From: Miles Date: Tue, 11 Jun 2024 21:31:10 -0400 Subject: [PATCH 08/11] copy the state at the beginning of default_inserter (#190) --- src/solvers/insert/insert.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/solvers/insert/insert.jl b/src/solvers/insert/insert.jl index a0250c95..11aed223 100644 --- a/src/solvers/insert/insert.jl +++ b/src/solvers/insert/insert.jl @@ -14,6 +14,7 @@ function default_inserter( cutoff=nothing, internal_kwargs, ) + state = copy(state) spec = nothing other_vertex = setdiff(support(region), [ortho_vert]) if !isempty(other_vertex) @@ -37,10 +38,10 @@ function default_inserter( phi::ITensor, region::NamedEdge, ortho; - normalize=false, + cutoff=nothing, maxdim=nothing, mindim=nothing, - cutoff=nothing, + normalize=false, internal_kwargs, ) v = only(setdiff(support(region), [ortho])) From bb5033370ddb86a669625c6fbc95bfca42411947 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Tue, 11 Jun 2024 21:31:31 -0400 Subject: [PATCH 09/11] Bump to v0.11.14 [no ci] --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 877465cc..bc8e3e0d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.13" +version = "0.11.14" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" From dec464f388abfb81cbb400f4454d164fede3352b Mon Sep 17 00:00:00 2001 From: Joseph Tindall <51231103+JoeyT1994@users.noreply.github.com> Date: Tue, 25 Jun 2024 16:59:56 -0400 Subject: [PATCH 10/11] Fix BP convergence metric for complex networks (#193) --- src/caches/beliefpropagationcache.jl | 21 +++---- test/test_belief_propagation.jl | 87 +++++++++++++++------------- test/test_gauging.jl | 6 +- 3 files changed, 60 insertions(+), 54 deletions(-) diff --git a/src/caches/beliefpropagationcache.jl b/src/caches/beliefpropagationcache.jl index b980a52f..3da4ca67 100644 --- a/src/caches/beliefpropagationcache.jl +++ b/src/caches/beliefpropagationcache.jl @@ -1,6 +1,6 @@ using Graphs: IsDirected using SplitApplyCombine: group -using LinearAlgebra: diag +using LinearAlgebra: diag, dot using ITensors: dir using ITensorMPS: ITensorMPS using NamedGraphs.PartitionedGraphs: @@ -12,10 +12,10 @@ using NamedGraphs.PartitionedGraphs: partitionedges, unpartitioned_graph using SimpleTraits: SimpleTraits, Not, @traitfn +using NDTensors: NDTensors -default_message(inds_e) = ITensor[denseblocks(delta(i)) for i in inds_e] +default_message(elt, inds_e) = ITensor[denseblocks(delta(elt, i)) for i in inds_e] default_messages(ptn::PartitionedGraph) = Dictionary() -default_message_norm(m::ITensor) = norm(m) function default_message_update(contract_list::Vector{ITensor}; kwargs...) sequence = optimal_contraction_sequence(contract_list) updated_messages = contract(contract_list; sequence, kwargs...) @@ -33,17 +33,16 @@ default_partitioned_vertices(ψ::AbstractITensorNetwork) = group(v -> v, vertice function default_partitioned_vertices(f::AbstractFormNetwork) return group(v -> original_state_vertex(f, v), vertices(f)) end -default_cache_update_kwargs(cache) = (; maxiter=20, tol=1e-5) +default_cache_update_kwargs(cache) = (; maxiter=25, tol=1e-8) function default_cache_construction_kwargs(alg::Algorithm"bp", ψ::AbstractITensorNetwork) return (; partitioned_vertices=default_partitioned_vertices(ψ)) end -function message_diff( - message_a::Vector{ITensor}, message_b::Vector{ITensor}; message_norm=default_message_norm -) +#TODO: Take `dot` without precontracting the messages to allow scaling to more complex messages +function message_diff(message_a::Vector{ITensor}, message_b::Vector{ITensor}) lhs, rhs = contract(message_a), contract(message_b) - norm_lhs, norm_rhs = message_norm(lhs), message_norm(rhs) - return 0.5 * norm((denseblocks(lhs) / norm_lhs) - (denseblocks(rhs) / norm_rhs)) + f = abs2(dot(lhs / norm(lhs), rhs / norm(rhs))) + return 1 - f end struct BeliefPropagationCache{PTN,MTS,DM} @@ -99,8 +98,10 @@ for f in [ end end +NDTensors.scalartype(bp_cache) = scalartype(tensornetwork(bp_cache)) + function default_message(bp_cache::BeliefPropagationCache, edge::PartitionEdge) - return default_message(bp_cache)(linkinds(bp_cache, edge)) + return default_message(bp_cache)(scalartype(bp_cache), linkinds(bp_cache, edge)) end function message(bp_cache::BeliefPropagationCache, edge::PartitionEdge) diff --git a/test/test_belief_propagation.jl b/test/test_belief_propagation.jl index 728b06d6..a151e4d1 100644 --- a/test/test_belief_propagation.jl +++ b/test/test_belief_propagation.jl @@ -23,7 +23,8 @@ using ITensorNetworks: tensornetwork, update, update_factor, - update_message + update_message, + message_diff using ITensors: ITensors, ITensor, combiner, dag, inds, inner, op, prime, random_itensor using ITensorNetworks.ModelNetworks: ModelNetworks using ITensors.NDTensors: array @@ -34,50 +35,56 @@ using NamedGraphs.PartitionedGraphs: PartitionVertex, partitionedges using SplitApplyCombine: group using StableRNGs: StableRNG using Test: @test, @testset -@testset "belief_propagation" begin - ITensors.disable_warn_order() - g = named_grid((3, 3)) - s = siteinds("S=1/2", g) - χ = 2 - rng = StableRNG(1234) - ψ = random_tensornetwork(rng, s; link_space=χ) - ψψ = ψ ⊗ prime(dag(ψ); sites=[]) - bpc = BeliefPropagationCache(ψψ) - bpc = update(bpc; maxiter=50, tol=1e-10) - #Test messages are converged - for pe in partitionedges(partitioned_tensornetwork(bpc)) - @test update_message(bpc, pe) ≈ message(bpc, pe) atol = 1e-8 - end - #Test updating the underlying tensornetwork in the cache - v = first(vertices(ψψ)) - rng = StableRNG(1234) - new_tensor = random_itensor(rng, inds(ψψ[v])) - bpc_updated = update_factor(bpc, v, new_tensor) - ψψ_updated = tensornetwork(bpc_updated) - @test ψψ_updated[v] == new_tensor - #Test forming a two-site RDM. Check it has the correct size, trace 1 and is PSD - vs = [(2, 2), (2, 3)] +@testset "belief_propagation (eltype=$elt)" for elt in ( + Float32, Float64, Complex{Float32}, Complex{Float64} +) + begin + ITensors.disable_warn_order() + g = named_grid((3, 3)) + s = siteinds("S=1/2", g) + χ = 2 + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, elt, s; link_space=χ) + ψψ = ψ ⊗ prime(dag(ψ); sites=[]) + bpc = BeliefPropagationCache(ψψ, group(v -> first(v), vertices(ψψ))) + bpc = update(bpc; maxiter=25, tol=eps(real(elt))) + #Test messages are converged + for pe in partitionedges(partitioned_tensornetwork(bpc)) + @test message_diff(update_message(bpc, pe), message(bpc, pe)) < 10 * eps(real(elt)) + @test eltype(only(message(bpc, pe))) == elt + end + #Test updating the underlying tensornetwork in the cache + v = first(vertices(ψψ)) + rng = StableRNG(1234) + new_tensor = random_itensor(rng, inds(ψψ[v])) + bpc_updated = update_factor(bpc, v, new_tensor) + ψψ_updated = tensornetwork(bpc_updated) + @test ψψ_updated[v] == new_tensor + + #Test forming a two-site RDM. Check it has the correct size, trace 1 and is PSD + vs = [(2, 2), (2, 3)] - ψψsplit = split_index(ψψ, NamedEdge.([(v, 1) => (v, 2) for v in vs])) - env_tensors = environment(bpc, [(v, 2) for v in vs]) - rdm = contract(vcat(env_tensors, ITensor[ψψsplit[vp] for vp in [(v, 2) for v in vs]])) + ψψsplit = split_index(ψψ, NamedEdge.([(v, 1) => (v, 2) for v in vs])) + env_tensors = environment(bpc, [(v, 2) for v in vs]) + rdm = contract(vcat(env_tensors, ITensor[ψψsplit[vp] for vp in [(v, 2) for v in vs]])) - rdm = array((rdm * combiner(inds(rdm; plev=0)...)) * combiner(inds(rdm; plev=1)...)) - rdm /= tr(rdm) + rdm = array((rdm * combiner(inds(rdm; plev=0)...)) * combiner(inds(rdm; plev=1)...)) + rdm /= tr(rdm) - eigs = eigvals(rdm) - @test size(rdm) == (2^length(vs), 2^length(vs)) + eigs = eigvals(rdm) + @test size(rdm) == (2^length(vs), 2^length(vs)) - @test all(eig -> imag(eig) ≈ 0, eigs) - @test all(eig -> real(eig) >= -eps(eltype(eig)), eigs) + @test all(eig -> abs(imag(eig)) <= eps(real(elt)), eigs) + @test all(eig -> real(eig) >= -eps(real(elt)), eigs) - #Test edge case of network which evalutes to 0 - χ = 2 - g = named_grid((3, 1)) - rng = StableRNG(1234) - ψ = random_tensornetwork(rng, ComplexF64, g; link_space=χ) - ψ[(1, 1)] = 0.0 * ψ[(1, 1)] - @test iszero(scalar(ψ; alg="bp")) + #Test edge case of network which evalutes to 0 + χ = 2 + g = named_grid((3, 1)) + rng = StableRNG(1234) + ψ = random_tensornetwork(rng, elt, g; link_space=χ) + ψ[(1, 1)] = 0 * ψ[(1, 1)] + @test iszero(scalar(ψ; alg="bp")) + end end end diff --git a/test/test_gauging.jl b/test/test_gauging.jl index 9eb4ebbf..67b1b5dd 100644 --- a/test/test_gauging.jl +++ b/test/test_gauging.jl @@ -27,9 +27,7 @@ using Test: @test, @testset ψ = random_tensornetwork(rng, s; link_space=χ) # Move directly to vidal gauge - ψ_vidal = VidalITensorNetwork( - ψ; cache_update_kwargs=(; maxiter=20, tol=1e-12, verbose=true) - ) + ψ_vidal = VidalITensorNetwork(ψ; cache_update_kwargs=(; maxiter=30, verbose=true)) @test gauge_error(ψ_vidal) < 1e-8 # Move to symmetric gauge @@ -38,7 +36,7 @@ using Test: @test, @testset bp_cache = cache_ref[] # Test we just did a gauge transform and didn't change the overall network - @test inner(ψ_symm, ψ) / sqrt(inner(ψ_symm, ψ_symm) * inner(ψ, ψ)) ≈ 1.0 + @test inner(ψ_symm, ψ) / sqrt(inner(ψ_symm, ψ_symm) * inner(ψ, ψ)) ≈ 1.0 atol = 1e-8 #Test all message tensors are approximately diagonal even when we keep running BP bp_cache = update(bp_cache; maxiter=10) From 806d89757bbbe1fe140213a41f877cb8489f6645 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Tue, 25 Jun 2024 17:04:01 -0400 Subject: [PATCH 11/11] Bump to v0.11.15 [no ci] --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index bc8e3e0d..64af22b6 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman , Joseph Tindall and contributors"] -version = "0.11.14" +version = "0.11.15" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"