diff --git a/examples/treetensornetworks/solvers/01_tdvp.jl b/examples/treetensornetworks/solvers/01_tdvp.jl deleted file mode 100644 index af8943ca..00000000 --- a/examples/treetensornetworks/solvers/01_tdvp.jl +++ /dev/null @@ -1,42 +0,0 @@ -using ITensors -using ITensorNetworks - -n = 10 -s = siteinds("S=1/2", n) - -function heisenberg(n) - os = OpSum() - for j in 1:(n - 1) - os += 0.5, "S+", j, "S-", j + 1 - os += 0.5, "S-", j, "S+", j + 1 - os += "Sz", j, "Sz", j + 1 - end - return os -end - -H = MPO(heisenberg(n), s) -ψ = randomMPS(s, "↑"; linkdims=10) - -@show inner(ψ', H, ψ) / inner(ψ, ψ) - -ϕ = tdvp( - H, - -1.0, - ψ; - nsweeps=20, - reverse_step=false, - normalize=true, - maxdim=30, - cutoff=1e-10, - outputlevel=1, -) - -@show inner(ϕ', H, ϕ) / inner(ϕ, ϕ) - -e2, ϕ2 = dmrg(H, ψ; nsweeps=10, maxdim=20, cutoff=1e-10) - -@show inner(ϕ2', H, ϕ2) / inner(ϕ2, ϕ2) - -ϕ3 = ITensorNetworks.dmrg(H, ψ; nsweeps=10, maxdim=20, cutoff=1e-10, outputlevel=1) - -@show inner(ϕ3', H, ϕ3) / inner(ϕ3, ϕ3) diff --git a/examples/treetensornetworks/solvers/02_dmrg-x.jl b/examples/treetensornetworks/solvers/02_dmrg-x.jl deleted file mode 100644 index 0a17ad04..00000000 --- a/examples/treetensornetworks/solvers/02_dmrg-x.jl +++ /dev/null @@ -1,44 +0,0 @@ -using ITensors -using ITensorNetworks -using LinearAlgebra - -function heisenberg(n; h=zeros(n)) - os = OpSum() - for j in 1:(n - 1) - os += 0.5, "S+", j, "S-", j + 1 - os += 0.5, "S-", j, "S+", j + 1 - os += "Sz", j, "Sz", j + 1 - end - for j in 1:n - if h[j] ≠ 0 - os -= h[j], "Sz", j - end - end - return os -end - -n = 10 -s = siteinds("S=1/2", n) - -using Random -Random.seed!(12) - -# MBL when W > 3.5-4 -W = 12 -# Random fields h ∈ [-W, W] -h = W * (2 * rand(n) .- 1) -H = MPO(heisenberg(n; h), s) - -initstate = rand(["↑", "↓"], n) -ψ = MPS(s, initstate) - -dmrg_x_kwargs = ( - nsweeps=10, reverse_step=false, normalize=true, maxdim=20, cutoff=1e-10, outputlevel=1 -) - -ϕ = dmrg_x(H, ψ; dmrg_x_kwargs...) - -@show inner(ψ', H, ψ) / inner(ψ, ψ) -@show inner(H, ψ, H, ψ) - inner(ψ', H, ψ)^2 -@show inner(ϕ', H, ϕ) / inner(ϕ, ϕ) -@show inner(H, ϕ, H, ϕ) - inner(ϕ', H, ϕ)^2 diff --git a/examples/treetensornetworks/solvers/03_models.jl b/examples/treetensornetworks/solvers/03_models.jl deleted file mode 100644 index 03a4bc97..00000000 --- a/examples/treetensornetworks/solvers/03_models.jl +++ /dev/null @@ -1,20 +0,0 @@ -using ITensors - -function heisenberg(n; J=1.0, J2=0.0) - ℋ = OpSum() - if !iszero(J) - for j in 1:(n - 1) - ℋ += J / 2, "S+", j, "S-", j + 1 - ℋ += J / 2, "S-", j, "S+", j + 1 - ℋ += J, "Sz", j, "Sz", j + 1 - end - end - if !iszero(J2) - for j in 1:(n - 2) - ℋ += J2 / 2, "S+", j, "S-", j + 2 - ℋ += J2 / 2, "S-", j, "S+", j + 2 - ℋ += J2, "Sz", j, "Sz", j + 2 - end - end - return ℋ -end diff --git a/examples/treetensornetworks/solvers/03_tdvp_time_dependent.jl b/examples/treetensornetworks/solvers/03_tdvp_time_dependent.jl deleted file mode 100644 index 18b571a8..00000000 --- a/examples/treetensornetworks/solvers/03_tdvp_time_dependent.jl +++ /dev/null @@ -1,153 +0,0 @@ -using DifferentialEquations -using ITensors -using ITensorNetworks -using KrylovKit -using LinearAlgebra -using Random - -Random.seed!(1234) - -# Define the time-independent model -include("03_models.jl") - -# Define the solvers needed for TDVP -include("03_solvers.jl") - -# Time dependent Hamiltonian is: -# H(t) = H₁(t) + H₂(t) + … -# = f₁(t) H₁(0) + f₂(t) H₂(0) + … -# = cos(ω₁t) H₁(0) + cos(ω₂t) H₂(0) + … - -# Number of sites -n = 6 - -# How much information to output from TDVP -# Set to 2 to get information about each bond/site -# evolution, and 3 to get information about the -# solver. -outputlevel = 1 - -# Frequency of time dependent terms -ω₁ = 0.1 -ω₂ = 0.2 - -# Nearest and next-nearest neighbor -# Heisenberg couplings. -J₁ = 1.0 -J₂ = 1.0 - -time_step = 0.1 -time_stop = 1.0 - -# nsite-update TDVP -nsite = 2 - -# Starting state bond/link dimension. -# A product state starting state can -# cause issues for TDVP without -# subspace expansion. -start_linkdim = 4 - -# TDVP truncation parameters -maxdim = 100 -cutoff = 1e-8 - -tol = 1e-15 - -# ODE solver parameters -ode_alg = Tsit5() -ode_kwargs = (; reltol=tol, abstol=tol) - -# Krylov solver parameters -krylov_kwargs = (; tol=tol, eager=true) - -@show n -@show ω₁, ω₂ -@show J₁, J₂ -@show maxdim, cutoff, nsite -@show start_linkdim -@show time_step, time_stop -@show ode_alg -@show ode_kwargs -@show krylov_kwargs - -ω⃗ = [ω₁, ω₂] -f⃗ = [t -> cos(ω * t) for ω in ω⃗] - -# H₀ = H(0) = H₁(0) + H₂(0) + … -ℋ₁₀ = heisenberg(n; J=J₁, J2=0.0) -ℋ₂₀ = heisenberg(n; J=0.0, J2=J₂) -ℋ⃗₀ = [ℋ₁₀, ℋ₂₀] - -s = siteinds("S=1/2", n) - -H⃗₀ = [MPO(ℋ₀, s) for ℋ₀ in ℋ⃗₀] - -# Initial state, ψ₀ = ψ(0) -# Initialize as complex since that is what DifferentialEquations.jl -# expects. -ψ₀ = complex.(randomMPS(s, j -> isodd(j) ? "↑" : "↓"; linkdims=start_linkdim)) - -@show norm(ψ₀) - -println() -println("#"^100) -println("Running TDVP with ODE solver") -println("#"^100) -println() - -function ode_solver(H⃗₀, time_step, ψ₀; kwargs...) - return ode_solver( - -im * TimeDependentSum(f⃗, H⃗₀), - time_step, - ψ₀; - solver_alg=ode_alg, - ode_kwargs..., - kwargs..., - ) -end - -ψₜ_ode = tdvp(ode_solver, H⃗₀, time_stop, ψ₀; time_step, maxdim, cutoff, nsite, outputlevel) - -println() -println("Finished running TDVP with ODE solver") -println() - -println() -println("#"^100) -println("Running TDVP with Krylov solver") -println("#"^100) -println() - -function krylov_solver(H⃗₀, time_step, ψ₀; kwargs...) - return krylov_solver( - -im * TimeDependentSum(f⃗, H⃗₀), time_step, ψ₀; krylov_kwargs..., kwargs... - ) -end - -ψₜ_krylov = tdvp(krylov_solver, H⃗₀, time_stop, ψ₀; time_step, cutoff, nsite, outputlevel) - -println() -println("Finished running TDVP with Krylov solver") -println() - -println() -println("#"^100) -println("Running full state evolution with ODE solver") -println("#"^100) -println() - -@disable_warn_order begin - ψₜ_full, _ = ode_solver(prod.(H⃗₀), time_stop, prod(ψ₀); outputlevel) -end - -println() -println("Finished full state evolution with ODE solver") -println() - -@show norm(ψₜ_ode) -@show norm(ψₜ_krylov) -@show norm(ψₜ_full) - -@show 1 - abs(inner(prod(ψₜ_ode), ψₜ_full)) -@show 1 - abs(inner(prod(ψₜ_krylov), ψₜ_full)) diff --git a/examples/treetensornetworks/solvers/04_tdvp_observers.jl b/examples/treetensornetworks/solvers/04_tdvp_observers.jl deleted file mode 100644 index 9cb02c8e..00000000 --- a/examples/treetensornetworks/solvers/04_tdvp_observers.jl +++ /dev/null @@ -1,82 +0,0 @@ -using ITensors -using ITensorNetworks -using Observers - -function heisenberg(N) - os = OpSum() - for j in 1:(N - 1) - os += 0.5, "S+", j, "S-", j + 1 - os += 0.5, "S-", j, "S+", j + 1 - os += "Sz", j, "Sz", j + 1 - end - return os -end - -N = 10 -cutoff = 1e-12 -tau = 0.1 -ttotal = 1.0 - -s = siteinds("S=1/2", N; conserve_qns=true) -H = MPO(heisenberg(N), s) - -function step(; sweep, bond, half_sweep) - if bond == 1 && half_sweep == 2 - return sweep - end - return nothing -end - -function current_time(; current_time, bond, half_sweep) - if bond == 1 && half_sweep == 2 - return current_time - end - return nothing -end - -function measure_sz(; psi, bond, half_sweep) - if bond == 1 && half_sweep == 2 - return expect(psi, "Sz"; vertices=[N ÷ 2]) - end - return nothing -end - -function return_state(; psi, bond, half_sweep) - if bond == 1 && half_sweep == 2 - return psi - end - return nothing -end - -obs = Observer( - "steps" => step, "times" => current_time, "psis" => return_state, "Sz" => measure_sz -) - -psi = MPS(s, n -> isodd(n) ? "Up" : "Dn") -psi_f = tdvp( - H, - -im * ttotal, - psi; - time_step=-im * tau, - cutoff, - outputlevel=1, - normalize=false, - (observer!)=obs, -) - -res = results(obs) -steps = res["steps"] -times = res["times"] -psis = res["psis"] -Sz = res["Sz"] - -println("\nResults") -println("=======") -for n in 1:length(steps) - print("step = ", steps[n]) - print(", time = ", round(times[n]; digits=3)) - print(", |⟨ψⁿ|ψⁱ⟩| = ", round(abs(inner(psis[n], psi)); digits=3)) - print(", |⟨ψⁿ|ψᶠ⟩| = ", round(abs(inner(psis[n], psi_f)); digits=3)) - print(", ⟨Sᶻ⟩ = ", round(Sz[n]; digits=3)) - println() -end diff --git a/examples/treetensornetworks/solvers/05_tdvp_nonuniform_timesteps.jl b/examples/treetensornetworks/solvers/05_tdvp_nonuniform_timesteps.jl deleted file mode 100644 index eeae88c0..00000000 --- a/examples/treetensornetworks/solvers/05_tdvp_nonuniform_timesteps.jl +++ /dev/null @@ -1,47 +0,0 @@ -using ITensors -using ITensorNetworks - -include("05_utils.jl") - -function heisenberg(N) - os = OpSum() - for j in 1:(N - 1) - os += 0.5, "S+", j, "S-", j + 1 - os += 0.5, "S-", j, "S+", j + 1 - os += "Sz", j, "Sz", j + 1 - end - return os -end - -N = 10 -cutoff = 1e-12 -outputlevel = 1 -nsteps = 10 -time_steps = [n ≤ 2 ? -0.2im : -0.1im for n in 1:nsteps] - -obs = Observer("times" => (; current_time) -> current_time, "psis" => (; psi) -> psi) - -s = siteinds("S=1/2", N; conserve_qns=true) -H = MPO(heisenberg(N), s) - -psi0 = MPS(s, n -> isodd(n) ? "Up" : "Dn") -psi = tdvp_nonuniform_timesteps( - ProjMPO(H), psi0; time_steps, cutoff, outputlevel, (step_observer!)=obs -) - -res = results(obs) -times = res["times"] -psis = res["psis"] - -println("\nResults") -println("=======") -print("step = ", 0) -print(", time = ", zero(ComplexF64)) -print(", ⟨Sᶻ⟩ = ", round(expect(psi0, "Sz"; vertices=[N ÷ 2]); digits=3)) -println() -for n in 1:length(times) - print("step = ", n) - print(", time = ", round(times[n]; digits=3)) - print(", ⟨Sᶻ⟩ = ", round(expect(psis[n], "Sz"; vertices=[N ÷ 2]); digits=3)) - println() -end diff --git a/examples/treetensornetworks/solvers/05_utils.jl b/examples/treetensornetworks/solvers/05_utils.jl deleted file mode 100644 index a8c5feeb..00000000 --- a/examples/treetensornetworks/solvers/05_utils.jl +++ /dev/null @@ -1,60 +0,0 @@ -using ITensors -using ITensorNetworks -using Observers -using Printf - -using ITensorNetworks: tdvp_solver, tdvp_step, process_sweeps, TDVPOrder - -function tdvp_nonuniform_timesteps( - solver, - PH, - psi::MPS; - time_steps, - reverse_step=true, - time_start=0.0, - order=2, - (step_observer!)=Observer(), - kwargs..., -) - nsweeps = length(time_steps) - maxdim, mindim, cutoff, noise = process_sweeps(; nsweeps, kwargs...) - tdvp_order = TDVPOrder(order, Base.Forward) - current_time = time_start - for sw in 1:nsweeps - sw_time = @elapsed begin - psi, PH, info = tdvp_step( - tdvp_order, - solver, - PH, - time_steps[sw], - psi; - kwargs..., - current_time, - reverse_step, - sweep=sw, - maxdim=maxdim[sw], - mindim=mindim[sw], - cutoff=cutoff[sw], - noise=noise[sw], - ) - end - current_time += time_steps[sw] - - update!(step_observer!; psi, sweep=sw, outputlevel, current_time) - - if outputlevel ≥ 1 - print("After sweep ", sw, ":") - print(" maxlinkdim=", maxlinkdim(psi)) - @printf(" maxerr=%.2E", info.maxtruncerr) - print(" current_time=", round(current_time; digits=3)) - print(" time=", round(sw_time; digits=3)) - println() - flush(stdout) - end - end - return psi -end - -function tdvp_nonuniform_timesteps(H, psi::MPS; kwargs...) - return tdvp_nonuniform_timesteps(tdvp_solver(; kwargs...), H, psi; kwargs...) -end diff --git a/src/Graphs/abstractdatagraph.jl b/src/Graphs/abstractdatagraph.jl index 6f9f4893..bf75ab18 100644 --- a/src/Graphs/abstractdatagraph.jl +++ b/src/Graphs/abstractdatagraph.jl @@ -1,3 +1,6 @@ +using DataGraphs: DataGraphs, AbstractDataGraph, underlying_graph +using NamedGraphs: AbstractNamedGraph + # TODO: we may want to move these to `DataGraphs.jl` for f in [:_root, :_is_rooted, :_is_rooted_directed_binary_tree] @eval begin @@ -6,3 +9,13 @@ for f in [:_root, :_is_rooted, :_is_rooted_directed_binary_tree] end end end + +DataGraphs.edge_data_type(::AbstractNamedGraph) = Any + +Base.isassigned(::AbstractNamedGraph, ::Any) = false + +function Base.iterate(::AbstractDataGraph) + return error( + "Iterating data graphs is not yet defined. We may define it in the future as iterating through the vertex and edge data.", + ) +end diff --git a/src/ITensorNetworks.jl b/src/ITensorNetworks.jl index f3004858..6e32cc5c 100644 --- a/src/ITensorNetworks.jl +++ b/src/ITensorNetworks.jl @@ -1,8 +1,5 @@ module ITensorNetworks -using AbstractTrees -using Combinatorics -using Compat using DataGraphs using DataStructures using Dictionaries @@ -31,7 +28,6 @@ using SplitApplyCombine using StaticArrays using Suppressor using TimerOutputs -using StructWalk: StructWalk, WalkStyle, postwalk using DataGraphs: IsUnderlyingGraph, edge_data_type, vertex_data_type using Graphs: AbstractEdge, AbstractGraph, Graph, add_edge! @@ -48,23 +44,10 @@ using ITensors: orthocenter using KrylovKit: exponentiate, eigsolve, linsolve using NamedGraphs: - AbstractNamedGraph, - parent_graph, - vertex_to_parent_vertex, - parent_vertices_to_vertices, - not_implemented - -include("imports.jl") - -# TODO: Move to `DataGraphs.jl` -edge_data_type(::AbstractNamedGraph) = Any -isassigned(::AbstractNamedGraph, ::Any) = false -function iterate(::AbstractDataGraph) - return error( - "Iterating data graphs is not yet defined. We may define it in the future as iterating through the vertex and edge data.", - ) -end + AbstractNamedGraph, parent_graph, parent_vertices_to_vertices, not_implemented +include("Graphs/abstractgraph.jl") +include("Graphs/abstractdatagraph.jl") include("observers.jl") include("visualize.jl") include("graphs.jl") @@ -82,53 +65,51 @@ include("tebd.jl") include("itensornetwork.jl") include("mincut.jl") include("contract_deltas.jl") -include(joinpath("approx_itensornetwork", "utils.jl")) -include(joinpath("approx_itensornetwork", "density_matrix.jl")) -include(joinpath("approx_itensornetwork", "ttn_svd.jl")) -include(joinpath("approx_itensornetwork", "approx_itensornetwork.jl")) -include(joinpath("approx_itensornetwork", "partition.jl")) -include(joinpath("approx_itensornetwork", "binary_tree_partition.jl")) +include("approx_itensornetwork/utils.jl") +include("approx_itensornetwork/density_matrix.jl") +include("approx_itensornetwork/ttn_svd.jl") +include("approx_itensornetwork/approx_itensornetwork.jl") +include("approx_itensornetwork/partition.jl") +include("approx_itensornetwork/binary_tree_partition.jl") include("contract.jl") include("utility.jl") include("specialitensornetworks.jl") include("boundarymps.jl") include("partitioneditensornetwork.jl") include("edge_sequences.jl") -include(joinpath("formnetworks", "abstractformnetwork.jl")) -include(joinpath("formnetworks", "bilinearformnetwork.jl")) -include(joinpath("formnetworks", "quadraticformnetwork.jl")) -include(joinpath("caches", "beliefpropagationcache.jl")) +include("formnetworks/abstractformnetwork.jl") +include("formnetworks/bilinearformnetwork.jl") +include("formnetworks/quadraticformnetwork.jl") +include("caches/beliefpropagationcache.jl") include("contraction_tree_to_graph.jl") include("gauging.jl") include("utils.jl") include("tensornetworkoperators.jl") -include(joinpath("ITensorsExt", "itensorutils.jl")) -include(joinpath("Graphs", "abstractgraph.jl")) -include(joinpath("Graphs", "abstractdatagraph.jl")) -include(joinpath("solvers", "local_solvers", "eigsolve.jl")) -include(joinpath("solvers", "local_solvers", "exponentiate.jl")) -include(joinpath("solvers", "local_solvers", "dmrg_x.jl")) -include(joinpath("solvers", "local_solvers", "contract.jl")) -include(joinpath("solvers", "local_solvers", "linsolve.jl")) -include(joinpath("treetensornetworks", "abstracttreetensornetwork.jl")) -include(joinpath("treetensornetworks", "ttn.jl")) -include(joinpath("treetensornetworks", "opsum_to_ttn.jl")) -include(joinpath("treetensornetworks", "projttns", "abstractprojttn.jl")) -include(joinpath("treetensornetworks", "projttns", "projttn.jl")) -include(joinpath("treetensornetworks", "projttns", "projttnsum.jl")) -include(joinpath("treetensornetworks", "projttns", "projouterprodttn.jl")) -include(joinpath("solvers", "solver_utils.jl")) -include(joinpath("solvers", "defaults.jl")) -include(joinpath("solvers", "insert", "insert.jl")) -include(joinpath("solvers", "extract", "extract.jl")) -include(joinpath("solvers", "alternating_update", "alternating_update.jl")) -include(joinpath("solvers", "alternating_update", "region_update.jl")) -include(joinpath("solvers", "tdvp.jl")) -include(joinpath("solvers", "dmrg.jl")) -include(joinpath("solvers", "dmrg_x.jl")) -include(joinpath("solvers", "contract.jl")) -include(joinpath("solvers", "linsolve.jl")) -include(joinpath("solvers", "sweep_plans", "sweep_plans.jl")) +include("ITensorsExt/itensorutils.jl") +include("solvers/local_solvers/eigsolve.jl") +include("solvers/local_solvers/exponentiate.jl") +include("solvers/local_solvers/dmrg_x.jl") +include("solvers/local_solvers/contract.jl") +include("solvers/local_solvers/linsolve.jl") +include("treetensornetworks/abstracttreetensornetwork.jl") +include("treetensornetworks/ttn.jl") +include("treetensornetworks/opsum_to_ttn.jl") +include("treetensornetworks/projttns/abstractprojttn.jl") +include("treetensornetworks/projttns/projttn.jl") +include("treetensornetworks/projttns/projttnsum.jl") +include("treetensornetworks/projttns/projouterprodttn.jl") +include("solvers/solver_utils.jl") +include("solvers/defaults.jl") +include("solvers/insert/insert.jl") +include("solvers/extract/extract.jl") +include("solvers/alternating_update/alternating_update.jl") +include("solvers/alternating_update/region_update.jl") +include("solvers/tdvp.jl") +include("solvers/dmrg.jl") +include("solvers/dmrg_x.jl") +include("solvers/contract.jl") +include("solvers/linsolve.jl") +include("solvers/sweep_plans/sweep_plans.jl") include("apply.jl") include("environment.jl") @@ -137,7 +118,7 @@ include("exports.jl") function __init__() @require_extensions @require OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" include( - joinpath("requires", "omeinsumcontractionorders.jl") + "requires/omeinsumcontractionorders.jl" ) end diff --git a/src/abstractindsnetwork.jl b/src/abstractindsnetwork.jl index fcdeb513..01f78090 100644 --- a/src/abstractindsnetwork.jl +++ b/src/abstractindsnetwork.jl @@ -1,23 +1,32 @@ +using DataGraphs: DataGraphs, AbstractDataGraph, edge_data, edge_data_type, vertex_data +using Graphs: Graphs +using ITensors: ITensors, unioninds, uniqueinds +using NamedGraphs: NamedGraphs, incident_edges, rename_vertices + abstract type AbstractIndsNetwork{V,I} <: AbstractDataGraph{V,Vector{I},Vector{I}} end # Field access data_graph(graph::AbstractIndsNetwork) = not_implemented() # Overload if needed -is_directed(::Type{<:AbstractIndsNetwork}) = false +Graphs.is_directed(::Type{<:AbstractIndsNetwork}) = false # AbstractDataGraphs overloads -vertex_data(graph::AbstractIndsNetwork, args...) = vertex_data(data_graph(graph), args...) -edge_data(graph::AbstractIndsNetwork, args...) = edge_data(data_graph(graph), args...) +function DataGraphs.vertex_data(graph::AbstractIndsNetwork, args...) + return vertex_data(data_graph(graph), args...) +end +function DataGraphs.edge_data(graph::AbstractIndsNetwork, args...) + return edge_data(data_graph(graph), args...) +end # TODO: Define a generic fallback for `AbstractDataGraph`? -edge_data_type(::Type{<:AbstractIndsNetwork{V,I}}) where {V,I} = Vector{I} +DataGraphs.edge_data_type(::Type{<:AbstractIndsNetwork{V,I}}) where {V,I} = Vector{I} # # Index access # -function uniqueinds(is::AbstractIndsNetwork, edge::AbstractEdge) +function ITensors.uniqueinds(is::AbstractIndsNetwork, edge::AbstractEdge) inds = IndexSet(get(is, src(edge), Index[])) for ei in setdiff(incident_edges(is, src(edge)), [edge]) inds = unioninds(inds, get(is, ei, Index[])) @@ -25,15 +34,15 @@ function uniqueinds(is::AbstractIndsNetwork, edge::AbstractEdge) return inds end -function uniqueinds(is::AbstractIndsNetwork, edge::Pair) +function ITensors.uniqueinds(is::AbstractIndsNetwork, edge::Pair) return uniqueinds(is, edgetype(is)(edge)) end -function union(tn1::AbstractIndsNetwork, tn2::AbstractIndsNetwork; kwargs...) +function Base.union(tn1::AbstractIndsNetwork, tn2::AbstractIndsNetwork; kwargs...) return IndsNetwork(union(data_graph(tn1), data_graph(tn2); kwargs...)) end -function rename_vertices(f::Function, tn::AbstractIndsNetwork) +function NamedGraphs.rename_vertices(f::Function, tn::AbstractIndsNetwork) return IndsNetwork(rename_vertices(f, data_graph(tn))) end diff --git a/src/abstractitensornetwork.jl b/src/abstractitensornetwork.jl index a4d102c5..5a66fcda 100644 --- a/src/abstractitensornetwork.jl +++ b/src/abstractitensornetwork.jl @@ -1,3 +1,39 @@ +using DataGraphs: + DataGraphs, edge_data, underlying_graph, underlying_graph_type, vertex_data +using Dictionaries: Dictionary +using Graphs: Graphs, Graph, add_edge!, dst, edgetype, neighbors, rem_edge!, src, vertices +using ITensors: + ITensors, + ITensor, + addtags, + commoninds, + contract, + dag, + hascommoninds, + noprime, + prime, + replaceprime, + setprime, + unioninds, + uniqueinds, + removetags, + replacetags, + settags, + sim, + swaptags +using ITensors.ITensorMPS: ITensorMPS +using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize +using ITensors.NDTensors: NDTensors +using LinearAlgebra: LinearAlgebra +using NamedGraphs: + NamedGraphs, + NamedGraph, + incident_edges, + not_implemented, + rename_vertices, + vertex_to_parent_vertex, + vertextype + abstract type AbstractITensorNetwork{V} <: AbstractDataGraph{V,ITensor,ITensor} end # Field access @@ -5,10 +41,10 @@ data_graph_type(::Type{<:AbstractITensorNetwork}) = not_implemented() data_graph(graph::AbstractITensorNetwork) = not_implemented() # TODO: Define a generic fallback for `AbstractDataGraph`? -edge_data_type(::Type{<:AbstractITensorNetwork}) = ITensor +DataGraphs.edge_data_type(::Type{<:AbstractITensorNetwork}) = ITensor # Graphs.jl overloads -function weights(graph::AbstractITensorNetwork) +function Graphs.weights(graph::AbstractITensorNetwork) V = vertextype(graph) es = Tuple.(edges(graph)) ws = Dictionary{Tuple{V,V},Float64}(es, undef) @@ -20,31 +56,33 @@ function weights(graph::AbstractITensorNetwork) end # Copy -copy(tn::AbstractITensorNetwork) = not_implemented() +Base.copy(tn::AbstractITensorNetwork) = not_implemented() # Iteration -iterate(tn::AbstractITensorNetwork, args...) = iterate(vertex_data(tn), args...) +Base.iterate(tn::AbstractITensorNetwork, args...) = iterate(vertex_data(tn), args...) # TODO: This contrasts with the `DataGraphs.AbstractDataGraph` definition, # where it is defined as the `vertextype`. Does that cause problems or should it be changed? -eltype(tn::AbstractITensorNetwork) = eltype(vertex_data(tn)) +Base.eltype(tn::AbstractITensorNetwork) = eltype(vertex_data(tn)) # Overload if needed -is_directed(::Type{<:AbstractITensorNetwork}) = false +Graphs.is_directed(::Type{<:AbstractITensorNetwork}) = false # Derived interface, may need to be overloaded -function underlying_graph_type(G::Type{<:AbstractITensorNetwork}) +function DataGraphs.underlying_graph_type(G::Type{<:AbstractITensorNetwork}) return underlying_graph_type(data_graph_type(G)) end # AbstractDataGraphs overloads -function vertex_data(graph::AbstractITensorNetwork, args...) +function DataGraphs.vertex_data(graph::AbstractITensorNetwork, args...) return vertex_data(data_graph(graph), args...) end -edge_data(graph::AbstractITensorNetwork, args...) = edge_data(data_graph(graph), args...) +function DataGraphs.edge_data(graph::AbstractITensorNetwork, args...) + return edge_data(data_graph(graph), args...) +end -underlying_graph(tn::AbstractITensorNetwork) = underlying_graph(data_graph(tn)) -function vertex_to_parent_vertex(tn::AbstractITensorNetwork, vertex) +DataGraphs.underlying_graph(tn::AbstractITensorNetwork) = underlying_graph(data_graph(tn)) +function NamedGraphs.vertex_to_parent_vertex(tn::AbstractITensorNetwork, vertex) return vertex_to_parent_vertex(underlying_graph(tn), vertex) end @@ -58,7 +96,7 @@ end # TODO: broadcasting -function union(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork; kwargs...) +function Base.union(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork; kwargs...) tn = ITensorNetwork(union(data_graph(tn1), data_graph(tn2)); kwargs...) # Add any new edges that are introduced during the union for v1 in vertices(tn1) @@ -71,7 +109,7 @@ function union(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork; kwargs. return tn end -function rename_vertices(f::Function, tn::AbstractITensorNetwork) +function NamedGraphs.rename_vertices(f::Function, tn::AbstractITensorNetwork) return ITensorNetwork(rename_vertices(f, data_graph(tn))) end @@ -84,15 +122,15 @@ function setindex_preserve_graph!(tn::AbstractITensorNetwork, value, vertex) return tn end -function hascommoninds(tn::AbstractITensorNetwork, edge::Pair) +function ITensors.hascommoninds(tn::AbstractITensorNetwork, edge::Pair) return hascommoninds(tn, edgetype(tn)(edge)) end -function hascommoninds(tn::AbstractITensorNetwork, edge::AbstractEdge) +function ITensors.hascommoninds(tn::AbstractITensorNetwork, edge::AbstractEdge) return hascommoninds(tn[src(edge)], tn[dst(edge)]) end -function setindex!(tn::AbstractITensorNetwork, value, v) +function Base.setindex!(tn::AbstractITensorNetwork, value, v) # v = to_vertex(tn, index...) setindex_preserve_graph!(tn, value, v) for edge in incident_edges(tn, v) @@ -110,7 +148,7 @@ function setindex!(tn::AbstractITensorNetwork, value, v) end # Convert to a collection of ITensors (`Vector{ITensor}`). -function Vector{ITensor}(tn::AbstractITensorNetwork) +function Base.Vector{ITensor}(tn::AbstractITensorNetwork) return [tn[v] for v in vertices(tn)] end @@ -160,11 +198,11 @@ end # Conversion to Graphs # -function Graph(tn::AbstractITensorNetwork) +function Graphs.Graph(tn::AbstractITensorNetwork) return Graph(Vector{ITensor}(tn)) end -function NamedGraph(tn::AbstractITensorNetwork) +function NamedGraphs.NamedGraph(tn::AbstractITensorNetwork) return NamedGraph(Vector{ITensor}(tn)) end @@ -197,7 +235,7 @@ end # For backwards compatibility # TODO: Delete this -siteinds(tn::AbstractITensorNetwork) = external_indsnetwork(tn) +ITensorMPS.siteinds(tn::AbstractITensorNetwork) = external_indsnetwork(tn) # External indsnetwork of the flattened network, with vertices # mapped back to `tn1`. @@ -223,7 +261,7 @@ end # For backwards compatibility # TODO: Delete this -linkinds(tn::AbstractITensorNetwork) = internal_indsnetwork(tn) +ITensorMPS.linkinds(tn::AbstractITensorNetwork) = internal_indsnetwork(tn) # # Index access @@ -233,28 +271,28 @@ function neighbor_itensors(tn::AbstractITensorNetwork, vertex) return [tn[vn] for vn in neighbors(tn, vertex)] end -function uniqueinds(tn::AbstractITensorNetwork, vertex) +function ITensors.uniqueinds(tn::AbstractITensorNetwork, vertex) return uniqueinds(tn[vertex], neighbor_itensors(tn, vertex)...) end -function uniqueinds(tn::AbstractITensorNetwork, edge::AbstractEdge) +function ITensors.uniqueinds(tn::AbstractITensorNetwork, edge::AbstractEdge) return uniqueinds(tn[src(edge)], tn[dst(edge)]) end -function uniqueinds(tn::AbstractITensorNetwork, edge::Pair) +function ITensors.uniqueinds(tn::AbstractITensorNetwork, edge::Pair) return uniqueinds(tn, edgetype(tn)(edge)) end -function siteinds(tn::AbstractITensorNetwork, vertex) +function ITensors.siteinds(tn::AbstractITensorNetwork, vertex) return uniqueinds(tn, vertex) end -function commoninds(tn::AbstractITensorNetwork, edge) +function ITensors.commoninds(tn::AbstractITensorNetwork, edge) e = edgetype(tn)(edge) return commoninds(tn[src(e)], tn[dst(e)]) end -function linkinds(tn::AbstractITensorNetwork, edge) +function ITensorMPS.linkinds(tn::AbstractITensorNetwork, edge) return commoninds(tn, edge) end @@ -267,7 +305,9 @@ function externalinds(tn::AbstractITensorNetwork) end # Priming and tagging (changing Index identifiers) -function replaceinds(tn::AbstractITensorNetwork, is_is′::Pair{<:IndsNetwork,<:IndsNetwork}) +function ITensors.replaceinds( + tn::AbstractITensorNetwork, is_is′::Pair{<:IndsNetwork,<:IndsNetwork} +) tn = copy(tn) is, is′ = is_is′ @assert underlying_graph(is) == underlying_graph(is′) @@ -311,11 +351,11 @@ const map_inds_label_functions = [ for f in map_inds_label_functions @eval begin - function $f(n::Union{IndsNetwork,AbstractITensorNetwork}, args...; kwargs...) + function ITensors.$f(n::Union{IndsNetwork,AbstractITensorNetwork}, args...; kwargs...) return map_inds($f, n, args...; kwargs...) end - function $f( + function ITensors.$f( ffilter::typeof(linkinds), n::Union{IndsNetwork,AbstractITensorNetwork}, args...; @@ -324,7 +364,7 @@ for f in map_inds_label_functions return map_inds($f, n, args...; sites=[], kwargs...) end - function $f( + function ITensors.$f( ffilter::typeof(siteinds), n::Union{IndsNetwork,AbstractITensorNetwork}, args...; @@ -335,10 +375,10 @@ for f in map_inds_label_functions end end -adjoint(tn::Union{IndsNetwork,AbstractITensorNetwork}) = prime(tn) +LinearAlgebra.adjoint(tn::Union{IndsNetwork,AbstractITensorNetwork}) = prime(tn) #dag(tn::AbstractITensorNetwork) = map_vertex_data(dag, tn) -function dag(tn::AbstractITensorNetwork) +function ITensors.dag(tn::AbstractITensorNetwork) tndag = copy(tn) for v in vertices(tndag) setindex_preserve_graph!(tndag, dag(tndag[v]), v) @@ -369,7 +409,7 @@ end # TODO: how to define this lazily? #norm(tn::AbstractITensorNetwork) = sqrt(inner(tn, tn)) -function isapprox( +function Base.isapprox( x::AbstractITensorNetwork, y::AbstractITensorNetwork; atol::Real=0, @@ -387,7 +427,7 @@ function isapprox( return d <= max(atol, rtol * max(norm(x), norm(y))) end -function contract(tn::AbstractITensorNetwork, edge::Pair; kwargs...) +function ITensors.contract(tn::AbstractITensorNetwork, edge::Pair; kwargs...) return contract(tn, edgetype(tn)(edge); kwargs...) end @@ -396,7 +436,9 @@ end # the vertex `src(edge)`. # TODO: write this in terms of a more generic function # `Graphs.merge_vertices!` (https://github.com/mtfishman/ITensorNetworks.jl/issues/12) -function contract(tn::AbstractITensorNetwork, edge::AbstractEdge; merged_vertex=dst(edge)) +function NDTensors.contract( + tn::AbstractITensorNetwork, edge::AbstractEdge; merged_vertex=dst(edge) +) V = promote_type(vertextype(tn), typeof(merged_vertex)) # TODO: Check `ITensorNetwork{V}`, shouldn't need a copy here. tn = ITensorNetwork{V}(copy(tn)) @@ -424,16 +466,16 @@ function contract(tn::AbstractITensorNetwork, edge::AbstractEdge; merged_vertex= return tn end -function tags(tn::AbstractITensorNetwork, edge) +function ITensors.tags(tn::AbstractITensorNetwork, edge) is = linkinds(tn, edge) return commontags(is) end -function svd(tn::AbstractITensorNetwork, edge::Pair; kwargs...) +function LinearAlgebra.svd(tn::AbstractITensorNetwork, edge::Pair; kwargs...) return svd(tn, edgetype(tn)(edge)) end -function svd( +function LinearAlgebra.svd( tn::AbstractITensorNetwork, edge::AbstractEdge; U_vertex=src(edge), @@ -460,7 +502,7 @@ function svd( return tn end -function qr( +function LinearAlgebra.qr( tn::AbstractITensorNetwork, edge::AbstractEdge; Q_vertex=src(edge), @@ -482,7 +524,7 @@ function qr( return tn end -function factorize( +function LinearAlgebra.factorize( tn::AbstractITensorNetwork, edge::AbstractEdge; X_vertex=src(edge), @@ -519,7 +561,7 @@ function factorize( return tn end -function factorize(tn::AbstractITensorNetwork, edge::Pair; kwargs...) +function LinearAlgebra.factorize(tn::AbstractITensorNetwork, edge::Pair; kwargs...) return factorize(tn, edgetype(tn)(edge); kwargs...) end @@ -538,18 +580,18 @@ function _orthogonalize_edge(tn::AbstractITensorNetwork, edge::AbstractEdge; kwa return tn end -function orthogonalize(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) +function ITensorMPS.orthogonalize(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) return _orthogonalize_edge(tn, edge; kwargs...) end -function orthogonalize(tn::AbstractITensorNetwork, edge::Pair; kwargs...) +function ITensorMPS.orthogonalize(tn::AbstractITensorNetwork, edge::Pair; kwargs...) return orthogonalize(tn, edgetype(tn)(edge); kwargs...) end # Orthogonalize an ITensorNetwork towards a source vertex, treating # the network as a tree spanned by a spanning tree. # TODO: Rename `tree_orthogonalize`. -function orthogonalize(ψ::AbstractITensorNetwork, source_vertex) +function ITensorMPS.orthogonalize(ψ::AbstractITensorNetwork, source_vertex) spanning_tree_edges = post_order_dfs_edges(bfs_tree(ψ, source_vertex), source_vertex) for e in spanning_tree_edges ψ = orthogonalize(ψ, e) @@ -568,11 +610,11 @@ function _truncate_edge(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs.. return tn end -function truncate(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) +function Base.truncate(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) return _truncate_edge(tn, edge; kwargs...) end -function truncate(tn::AbstractITensorNetwork, edge::Pair; kwargs...) +function Base.truncate(tn::AbstractITensorNetwork, edge::Pair; kwargs...) return truncate(tn, edgetype(tn)(edge); kwargs...) end @@ -731,7 +773,7 @@ norm_sqr_network(ψ::AbstractITensorNetwork; kwargs...) = inner_network(ψ, ψ; # Printing # -function show(io::IO, mime::MIME"text/plain", graph::AbstractITensorNetwork) +function Base.show(io::IO, mime::MIME"text/plain", graph::AbstractITensorNetwork) println(io, "$(typeof(graph)) with $(nv(graph)) vertices:") show(io, mime, vertices(graph)) println(io, "\n") @@ -746,9 +788,9 @@ function show(io::IO, mime::MIME"text/plain", graph::AbstractITensorNetwork) return nothing end -show(io::IO, graph::AbstractITensorNetwork) = show(io, MIME"text/plain"(), graph) +Base.show(io::IO, graph::AbstractITensorNetwork) = show(io, MIME"text/plain"(), graph) -function visualize( +function ITensorVisualizationCore.visualize( tn::AbstractITensorNetwork, args...; vertex_labels_prefix=nothing, @@ -765,7 +807,7 @@ end # Link dimensions # -function maxlinkdim(tn::AbstractITensorNetwork) +function ITensors.maxlinkdim(tn::AbstractITensorNetwork) md = 1 for e in edges(tn) md = max(md, linkdim(tn, e)) @@ -773,16 +815,16 @@ function maxlinkdim(tn::AbstractITensorNetwork) return md end -function linkdim(tn::AbstractITensorNetwork, edge::Pair) +function ITensorMPS.linkdim(tn::AbstractITensorNetwork, edge::Pair) return linkdim(tn, edgetype(tn)(edge)) end -function linkdim(tn::AbstractITensorNetwork{V}, edge::AbstractEdge{V}) where {V} +function ITensorMPS.linkdim(tn::AbstractITensorNetwork{V}, edge::AbstractEdge{V}) where {V} ls = linkinds(tn, edge) return prod([isnothing(l) ? 1 : dim(l) for l in ls]) end -function linkdims(tn::AbstractITensorNetwork{V}) where {V} +function ITensorMPS.linkdims(tn::AbstractITensorNetwork{V}) where {V} ld = DataGraph{V,Any,Int}(copy(underlying_graph(tn))) for e in edges(ld) ld[e] = linkdim(tn, e) @@ -790,29 +832,6 @@ function linkdims(tn::AbstractITensorNetwork{V}) where {V} return ld end -# -# Common index checking -# - -function hascommoninds( - ::typeof(siteinds), A::AbstractITensorNetwork{V}, B::AbstractITensorNetwork{V} -) where {V} - for v in vertices(A) - !hascommoninds(siteinds(A, v), siteinds(B, v)) && return false - end - return true -end - -function hassameinds( - ::typeof(siteinds), A::AbstractITensorNetwork{V}, B::AbstractITensorNetwork{V} -) where {V} - nv(A) ≠ nv(B) && return false - for v in vertices(A) - !ITensors.hassameinds(siteinds(A, v), siteinds(B, v)) && return false - end - return true -end - # # Site combiners # @@ -863,7 +882,7 @@ is_multi_edge(tn::AbstractITensorNetwork, e) = length(linkinds(tn, e)) > 1 is_multi_edge(tn::AbstractITensorNetwork) = Base.Fix1(is_multi_edge, tn) """Add two itensornetworks together by growing the bond dimension. The network structures need to be have the same vertex names, same site index on each vertex """ -function add(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) +function ITensorMPS.add(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) @assert issetequal(vertices(tn1), vertices(tn2)) tn1 = combine_linkinds(tn1; edges=filter(is_multi_edge(tn1), edges(tn1))) @@ -918,28 +937,4 @@ function add(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) return tn12 end -+(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) = add(tn1, tn2) - -## # TODO: should this make sure that internal indices -## # don't clash? -## function hvncat( -## dim::Int, tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork; new_dim_names=(1, 2) -## ) -## dg = hvncat(dim, data_graph(tn1), data_graph(tn2); new_dim_names) -## -## # Add in missing edges that may be shared -## # across `tn1` and `tn2`. -## vertices1 = vertices(dg)[1:nv(tn1)] -## vertices2 = vertices(dg)[(nv(tn1) + 1):end] -## for v1 in vertices1, v2 in vertices2 -## if hascommoninds(dg[v1], dg[v2]) -## add_edge!(dg, v1 => v2) -## end -## end -## -## # TODO: Allow customization of the output type. -## ## return promote_type(typeof(tn1), typeof(tn2))(dg) -## ## return contract_output(typeof(tn1), typeof(tn2))(dg) -## -## return ITensorNetwork(dg) -## end +Base.:+(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) = add(tn1, tn2) diff --git a/src/apply.jl b/src/apply.jl index 89edf656..163c7454 100644 --- a/src/apply.jl +++ b/src/apply.jl @@ -1,3 +1,27 @@ +using ITensors: + ITensors, + Index, + ITensor, + apply, + commonind, + commoninds, + contract, + dag, + denseblocks, + hasqns, + isdiag, + noprime, + prime, + replaceind, + replaceinds, + unioninds, + uniqueinds +using ITensors.ContractionSequenceOptimization: optimal_contraction_sequence +using ITensors.ITensorMPS: siteinds +using LinearAlgebra: eigen, norm, svd +using NamedGraphs: NamedEdge +using Observers: Observers + function sqrt_and_inv_sqrt( A::ITensor; ishermitian=false, cutoff=nothing, regularization=nothing ) @@ -23,7 +47,9 @@ function symmetric_factorize( A::ITensor, inds...; (observer!)=nothing, tags="", svd_kwargs... ) if !isnothing(observer!) - insert_function!(observer!, "singular_values" => (; singular_values) -> singular_values) + Observers.insert_function!( + observer!, "singular_values" => (; singular_values) -> singular_values + ) end U, S, V = svd(A, inds...; lefttags=tags, righttags=tags, svd_kwargs...) u = commonind(S, U) @@ -44,7 +70,7 @@ function symmetric_factorize( Fu = replaceinds(Fu, v => u) S = replaceinds(S, v => u') end - update!(observer!; singular_values=S) + Observers.update!(observer!; singular_values=S) return Fu, Fv end @@ -359,7 +385,7 @@ function ITensors.apply(o, ψ::VidalITensorNetwork; normalize=false, apply_kwarg return VidalITensorNetwork(updated_ψ, updated_bond_tensors) else - updated_ψ = ITensors.apply(o, updated_ψ; normalize) + updated_ψ = apply(o, updated_ψ; normalize) return VidalITensorNetwork(ψ, updated_bond_tensors) end end @@ -389,9 +415,7 @@ function fidelity( ], envs, ) - term1 = ITensors.contract( - term1_tns; sequence=ITensors.optimal_contraction_sequence(term1_tns) - ) + term1 = ITensors.contract(term1_tns; sequence=optimal_contraction_sequence(term1_tns)) term2_tns = vcat( [ @@ -402,13 +426,9 @@ function fidelity( ], envs, ) - term2 = ITensors.contract( - term2_tns; sequence=ITensors.optimal_contraction_sequence(term2_tns) - ) + term2 = ITensors.contract(term2_tns; sequence=optimal_contraction_sequence(term2_tns)) term3_tns = vcat([p_prev, q_prev, prime(dag(p_cur)), prime(dag(q_cur)), gate], envs) - term3 = ITensors.contract( - term3_tns; sequence=ITensors.optimal_contraction_sequence(term3_tns) - ) + term3 = ITensors.contract(term3_tns; sequence=optimal_contraction_sequence(term3_tns)) f = term3[] / sqrt(term1[] * term2[]) return f * conj(f) @@ -435,16 +455,14 @@ function optimise_p_q( qs_ind = setdiff(inds(q_cur), collect(Iterators.flatten(inds.(vcat(envs, p_cur))))) ps_ind = setdiff(inds(p_cur), collect(Iterators.flatten(inds.(vcat(envs, q_cur))))) - opt_b_seq = ITensors.optimal_contraction_sequence( - vcat(ITensor[p, q, o, dag(prime(q_cur))], envs) - ) - opt_b_tilde_seq = ITensors.optimal_contraction_sequence( + opt_b_seq = optimal_contraction_sequence(vcat(ITensor[p, q, o, dag(prime(q_cur))], envs)) + opt_b_tilde_seq = optimal_contraction_sequence( vcat(ITensor[p, q, o, dag(prime(p_cur))], envs) ) - opt_M_seq = ITensors.optimal_contraction_sequence( + opt_M_seq = optimal_contraction_sequence( vcat(ITensor[q_cur, replaceinds(prime(dag(q_cur)), prime(qs_ind), qs_ind), p_cur], envs) ) - opt_M_tilde_seq = ITensors.optimal_contraction_sequence( + opt_M_tilde_seq = optimal_contraction_sequence( vcat(ITensor[p_cur, replaceinds(prime(dag(p_cur)), prime(ps_ind), ps_ind), q_cur], envs) ) diff --git a/src/approx_itensornetwork/binary_tree_partition.jl b/src/approx_itensornetwork/binary_tree_partition.jl index 4a7b5846..c4ee00a8 100644 --- a/src/approx_itensornetwork/binary_tree_partition.jl +++ b/src/approx_itensornetwork/binary_tree_partition.jl @@ -1,3 +1,8 @@ +using DataGraphs: DataGraph +using ITensors: Index, ITensor, delta, noncommoninds, replaceinds, sim +using ITensors.NDTensors: Algorithm, @Algorithm_str +using NamedGraphs: disjoint_union, rename_vertices, subgraph + function _binary_partition(tn::ITensorNetwork, source_inds::Vector{<:Index}) external_inds = noncommoninds(Vector{ITensor}(tn)...) # add delta tensor to each external ind diff --git a/src/caches/beliefpropagationcache.jl b/src/caches/beliefpropagationcache.jl index fa9ea51e..43fe3dc3 100644 --- a/src/caches/beliefpropagationcache.jl +++ b/src/caches/beliefpropagationcache.jl @@ -1,3 +1,6 @@ +using ITensors.ITensorMPS: ITensorMPS +using NamedGraphs: boundary_partitionedges + default_message(inds_e) = ITensor[denseblocks(delta(inds_e))] default_messages(ptn::PartitionedGraph) = Dictionary() function default_message_update(contract_list::Vector{ITensor}; kwargs...) @@ -57,7 +60,7 @@ for f in [ :(NamedGraphs.partitionvertices), :(NamedGraphs.vertices), :(NamedGraphs.boundary_partitionedges), - :linkinds, + :(ITensorMPS.linkinds), ] @eval begin function $f(bp_cache::BeliefPropagationCache, args...; kwargs...) @@ -80,7 +83,7 @@ function messages( return [message(bp_cache, edge; kwargs...) for edge in edges] end -function copy(bp_cache::BeliefPropagationCache) +function Base.copy(bp_cache::BeliefPropagationCache) return BeliefPropagationCache( copy(partitioned_itensornetwork(bp_cache)), copy(messages(bp_cache)), diff --git a/src/contract.jl b/src/contract.jl index f4e74603..44054b80 100644 --- a/src/contract.jl +++ b/src/contract.jl @@ -1,15 +1,21 @@ -function contract(tn::AbstractITensorNetwork; alg::String="exact", kwargs...) +using NamedGraphs: vertex_to_parent_vertex +using ITensors: ITensor +using ITensors.ContractionSequenceOptimization: deepmap +using ITensors.NDTensors: NDTensors, Algorithm, @Algorithm_str, contract +using LinearAlgebra: normalize! + +function NDTensors.contract(tn::AbstractITensorNetwork; alg::String="exact", kwargs...) return contract(Algorithm(alg), tn; kwargs...) end -function contract( +function NDTensors.contract( alg::Algorithm"exact", tn::AbstractITensorNetwork; sequence=vertices(tn), kwargs... ) sequence_linear_index = deepmap(v -> vertex_to_parent_vertex(tn, v), sequence) return contract(Vector{ITensor}(tn); sequence=sequence_linear_index, kwargs...) end -function contract( +function NDTensors.contract( alg::Union{Algorithm"density_matrix",Algorithm"ttn_svd"}, tn::AbstractITensorNetwork; output_structure::Function=path_graph_structure, diff --git a/src/contraction_sequences.jl b/src/contraction_sequences.jl index c32239a5..aca2254b 100644 --- a/src/contraction_sequences.jl +++ b/src/contraction_sequences.jl @@ -1,3 +1,9 @@ +using Graphs: vertices +using ITensors: ITensor, contract +using ITensors.ContractionSequenceOptimization: deepmap, optimal_contraction_sequence +using ITensors.NDTensors: Algorithm, @Algorithm_str +using NamedGraphs: Key + function contraction_sequence(tn::Vector{ITensor}; alg="optimal", kwargs...) return contraction_sequence(Algorithm(alg), tn; kwargs...) end diff --git a/src/expect.jl b/src/expect.jl index 7feb2e11..5d96320f 100644 --- a/src/expect.jl +++ b/src/expect.jl @@ -1,4 +1,6 @@ -function expect( +using ITensors.ITensorMPS: ITensorMPS + +function ITensorMPS.expect( op::String, ψ::AbstractITensorNetwork; cutoff=nothing, @@ -23,7 +25,7 @@ function expect( return res end -function expect( +function ITensorMPS.expect( ℋ::OpSum, ψ::AbstractITensorNetwork; cutoff=nothing, @@ -43,7 +45,7 @@ function expect( return ψh⃗ψ / ψψ end -function expect( +function ITensorMPS.expect( opsum_sum::Sum{<:OpSum}, ψ::AbstractITensorNetwork; cutoff=nothing, diff --git a/src/exports.jl b/src/exports.jl index 09dbc3bd..1471287b 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -32,7 +32,7 @@ export Key, post_order_dfs_edges, leaf_vertices, is_leaf, - incident_edges, + incident_edges, # TODO: Remove this export. comb_tree, named_comb_tree, subgraph, @@ -81,7 +81,6 @@ export AbstractITensorNetwork, ProjOuterProdTTN, set_nsite, position, - finite_state_machine, # contraction_sequences.jl contraction_sequence, # utils.jl diff --git a/src/formnetworks/abstractformnetwork.jl b/src/formnetworks/abstractformnetwork.jl index f0557ac6..d16f1f7c 100644 --- a/src/formnetworks/abstractformnetwork.jl +++ b/src/formnetworks/abstractformnetwork.jl @@ -7,7 +7,7 @@ abstract type AbstractFormNetwork{V} <: AbstractITensorNetwork{V} end #Needed for interface dual_index_map(f::AbstractFormNetwork) = not_implemented() tensornetwork(f::AbstractFormNetwork) = not_implemented() -copy(f::AbstractFormNetwork) = not_implemented() +Base.copy(f::AbstractFormNetwork) = not_implemented() operator_vertex_suffix(f::AbstractFormNetwork) = not_implemented() bra_vertex_suffix(f::AbstractFormNetwork) = not_implemented() ket_vertex_suffix(f::AbstractFormNetwork) = not_implemented() diff --git a/src/formnetworks/bilinearformnetwork.jl b/src/formnetworks/bilinearformnetwork.jl index 5519c1e3..e55f74b6 100644 --- a/src/formnetworks/bilinearformnetwork.jl +++ b/src/formnetworks/bilinearformnetwork.jl @@ -34,7 +34,7 @@ tensornetwork(blf::BilinearFormNetwork) = blf.tensornetwork data_graph_type(::Type{<:BilinearFormNetwork}) = data_graph_type(tensornetwork(blf)) data_graph(blf::BilinearFormNetwork) = data_graph(tensornetwork(blf)) -function copy(blf::BilinearFormNetwork) +function Base.copy(blf::BilinearFormNetwork) return BilinearFormNetwork( copy(tensornetwork(blf)), operator_vertex_suffix(blf), diff --git a/src/formnetworks/quadraticformnetwork.jl b/src/formnetworks/quadraticformnetwork.jl index 8aac841a..a5dfca5a 100644 --- a/src/formnetworks/quadraticformnetwork.jl +++ b/src/formnetworks/quadraticformnetwork.jl @@ -28,7 +28,7 @@ end dual_index_map(qf::QuadraticFormNetwork) = qf.dual_index_map dual_inv_index_map(qf::QuadraticFormNetwork) = qf.dual_inv_index_map -function copy(qf::QuadraticFormNetwork) +function Base.copy(qf::QuadraticFormNetwork) return QuadraticFormNetwork( copy(bilinear_formnetwork(qf)), dual_index_map(qf), dual_inv_index_map(qf) ) diff --git a/src/gauging.jl b/src/gauging.jl index 89a30555..73b7f6eb 100644 --- a/src/gauging.jl +++ b/src/gauging.jl @@ -1,3 +1,5 @@ +using ITensors.NDTensors: scalartype + function default_bond_tensors(ψ::ITensorNetwork) return DataGraph{vertextype(ψ),Nothing,ITensor}(underlying_graph(ψ)) end @@ -15,7 +17,7 @@ function data_graph_type(TN::Type{<:VidalITensorNetwork}) return data_graph_type(fieldtype(TN, :itensornetwork)) end data_graph(ψ::VidalITensorNetwork) = data_graph(site_tensors(ψ)) -function copy(ψ::VidalITensorNetwork) +function Base.copy(ψ::VidalITensorNetwork) return VidalITensorNetwork(copy(site_tensors(ψ)), copy(bond_tensors(ψ))) end diff --git a/src/graphs.jl b/src/graphs.jl index dbd68bca..bce2c90d 100644 --- a/src/graphs.jl +++ b/src/graphs.jl @@ -1,4 +1,7 @@ -function SimpleGraph(itensors::Vector{ITensor}) +using Graphs.SimpleGraphs: SimpleGraphs, SimpleGraph +using ITensors: ITensor, hascommoninds + +function SimpleGraphs.SimpleGraph(itensors::Vector{ITensor}) nv_graph = length(itensors) graph = SimpleGraph(nv_graph) for i in 1:(nv_graph - 1), j in (i + 1):nv_graph diff --git a/src/imports.jl b/src/imports.jl deleted file mode 100644 index db109a99..00000000 --- a/src/imports.jl +++ /dev/null @@ -1,108 +0,0 @@ -import Base: - # types - Vector, - # functions - convert, - copy, - eltype, - getindex, - hvncat, - setindex!, - show, - isapprox, - isassigned, - iterate, - union, - + - -import NamedGraphs: - vertextype, - convert_vertextype, - vertex_to_parent_vertex, - rename_vertices, - disjoint_union, - mincut_partitions, - incident_edges, - boundary_partitionedges - -import .DataGraphs: - underlying_graph, - underlying_graph_type, - vertex_data, - edge_data, - edge_data_type, - reverse_data_direction - -import Graphs: SimpleGraph, is_directed, weights - -import KrylovKit: eigsolve, linsolve - -import LinearAlgebra: factorize, normalize, normalize!, qr, svd - -import Observers: update! - -import ITensors: - # contraction - apply, - contract, - dmrg, - orthogonalize, - isortho, - inner, - loginner, - norm, - lognorm, - expect, - # truncation - truncate, - replacebond!, - replacebond, - # site and link indices - siteind, - siteinds, - linkinds, - # index set functions - uniqueinds, - commoninds, - replaceinds, - hascommoninds, - # priming and tagging - adjoint, - sim, - prime, - setprime, - noprime, - replaceprime, - addtags, - removetags, - replacetags, - settags, - tags, - # dag - dag, - # permute - permute, - #commoninds - hascommoninds, - # linkdims - linkdim, - linkdims, - maxlinkdim, - # projected operators - product, - nsite, - # promotion and conversion - promote_itensor_eltype, - scalartype, - #adding - add - -import ITensors.LazyApply: - # extracting terms from a sum - terms -#Algorithm -Algorithm - -using ITensors.ContractionSequenceOptimization: deepmap - -import ITensors.ITensorVisualizationCore: visualize diff --git a/src/indsnetwork.jl b/src/indsnetwork.jl index 06469133..a017f64c 100644 --- a/src/indsnetwork.jl +++ b/src/indsnetwork.jl @@ -1,3 +1,10 @@ +using DataGraphs: DataGraphs, vertex_data +using Graphs: Graphs +using Graphs.SimpleGraphs: AbstractSimpleGraph +using ITensors: Index, dag +using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize +using NamedGraphs: NamedGraphs, AbstractNamedGraph, NamedEdge, NamedGraph, vertextype + struct IndsNetwork{V,I} <: AbstractIndsNetwork{V,I} data_graph::DataGraph{V,Vector{I},Vector{I},NamedGraph{V},NamedEdge{V}} global function _IndsNetwork(V::Type, I::Type, g::DataGraph) @@ -7,10 +14,10 @@ end indtype(inds_network::IndsNetwork) = indtype(typeof(inds_network)) indtype(::Type{<:IndsNetwork{V,I}}) where {V,I} = I data_graph(is::IndsNetwork) = is.data_graph -underlying_graph(is::IndsNetwork) = underlying_graph(data_graph(is)) -vertextype(::Type{<:IndsNetwork{V}}) where {V} = V -underlying_graph_type(G::Type{<:IndsNetwork}) = NamedGraph{vertextype(G)} -is_directed(::Type{<:IndsNetwork}) = false +DataGraphs.underlying_graph(is::IndsNetwork) = underlying_graph(data_graph(is)) +NamedGraphs.vertextype(::Type{<:IndsNetwork{V}}) where {V} = V +DataGraphs.underlying_graph_type(G::Type{<:IndsNetwork}) = NamedGraph{vertextype(G)} +Graphs.is_directed(::Type{<:IndsNetwork}) = false # # Constructor @@ -18,7 +25,7 @@ is_directed(::Type{<:IndsNetwork}) = false # When setting an edge with collections of `Index`, set the reverse direction # edge with the `dag`. -function reverse_data_direction( +function DataGraphs.reverse_data_direction( inds_network::IndsNetwork, is::Union{Index,Tuple{Vararg{Index}},Vector{<:Index}} ) return dag(is) @@ -300,7 +307,7 @@ end # Utility # -copy(is::IndsNetwork) = IndsNetwork(copy(data_graph(is))) +Base.copy(is::IndsNetwork) = IndsNetwork(copy(data_graph(is))) function map_inds(f, is::IndsNetwork, args...; sites=nothing, links=nothing, kwargs...) return map_data(i -> f(i, args...; kwargs...), is; vertices=sites, edges=links) @@ -310,6 +317,6 @@ end # Visualization # -function visualize(is::IndsNetwork, args...; kwargs...) +function ITensorVisualizationCore.visualize(is::IndsNetwork, args...; kwargs...) return visualize(ITensorNetwork(is), args...; kwargs...) end diff --git a/src/itensornetwork.jl b/src/itensornetwork.jl index a37b0ac1..d67da2a9 100644 --- a/src/itensornetwork.jl +++ b/src/itensornetwork.jl @@ -1,3 +1,7 @@ +using DataGraphs: DataGraphs, DataGraph +using ITensors: ITensor +using NamedGraphs: NamedGraphs, NamedEdge, NamedGraph, vertextype + struct Private end """ @@ -17,7 +21,7 @@ end data_graph(tn::ITensorNetwork) = getfield(tn, :data_graph) data_graph_type(TN::Type{<:ITensorNetwork}) = fieldtype(TN, :data_graph) -function underlying_graph_type(TN::Type{<:ITensorNetwork}) +function DataGraphs.underlying_graph_type(TN::Type{<:ITensorNetwork}) return fieldtype(data_graph_type(TN), :underlying_graph) end @@ -44,10 +48,10 @@ function ITensorNetwork{V}(tn::AbstractITensorNetwork) where {V} end ITensorNetwork(tn::AbstractITensorNetwork) = ITensorNetwork{vertextype(tn)}(tn) -convert_vertextype(::Type{V}, tn::ITensorNetwork{V}) where {V} = tn -convert_vertextype(V::Type, tn::ITensorNetwork) = ITensorNetwork{V}(tn) +NamedGraphs.convert_vertextype(::Type{V}, tn::ITensorNetwork{V}) where {V} = tn +NamedGraphs.convert_vertextype(V::Type, tn::ITensorNetwork) = ITensorNetwork{V}(tn) -copy(tn::ITensorNetwork) = ITensorNetwork(copy(data_graph(tn))) +Base.copy(tn::ITensorNetwork) = ITensorNetwork(copy(data_graph(tn))) # # Construction from collections of ITensors @@ -266,6 +270,6 @@ end ITensorNetwork(itns::Vector{ITensorNetwork}) = reduce(⊗, itns) -function Vector{ITensor}(ψ::ITensorNetwork) +function Base.Vector{ITensor}(ψ::ITensorNetwork) return ITensor[ψ[v] for v in vertices(ψ)] end diff --git a/src/itensors.jl b/src/itensors.jl index a5c9f77b..3d747904 100644 --- a/src/itensors.jl +++ b/src/itensors.jl @@ -1,3 +1,8 @@ +using NamedGraphs: Key +using ITensors: ITensors, Index, ITensor, QN, inds, op, replaceinds, uniqueinds +using ITensors.NDTensors: NDTensors +using Dictionaries: Dictionary + # Tensor sum: `A ⊞ B = A ⊗ Iᴮ + Iᴬ ⊗ B` # https://github.com/JuliaLang/julia/issues/13333#issuecomment-143825995 # "PRESERVATION OF TENSOR SUM AND TENSOR PRODUCT" @@ -59,8 +64,8 @@ function ITensors.replaceinds(tensor::ITensor, ind_to_newind::Dict{<:Index,<:Ind return replaceinds(tensor, subset_inds => out_inds) end -is_delta(it::ITensor) = is_delta(ITensors.tensor(it)) -is_delta(t::ITensors.Tensor) = false -function is_delta(t::ITensors.NDTensors.UniformDiagTensor) - return isone(ITensors.NDTensors.getdiagindex(t, 1)) +is_delta(it::ITensor) = is_delta(NDTensors.tensor(it)) +is_delta(t::NDTensors.Tensor) = false +function is_delta(t::NDTensors.UniformDiagTensor) + return isone(NDTensors.getdiagindex(t, 1)) end diff --git a/src/mincut.jl b/src/mincut.jl index 067dc045..edb40ab8 100644 --- a/src/mincut.jl +++ b/src/mincut.jl @@ -1,3 +1,6 @@ +using AbstractTrees: Leaves, PostOrderDFS +using Combinatorics: powerset + # a large number to prevent this edge being a cut MAX_WEIGHT = 1e32 diff --git a/src/observers.jl b/src/observers.jl index 5e28433a..d4ff8945 100644 --- a/src/observers.jl +++ b/src/observers.jl @@ -1,4 +1,4 @@ """ Overload of `Observers.update!`. """ -update!(::Nothing; kwargs...) = nothing +Observers.update!(::Nothing; kwargs...) = nothing diff --git a/src/partitioneditensornetwork.jl b/src/partitioneditensornetwork.jl index 8c00d3db..b23a9af2 100644 --- a/src/partitioneditensornetwork.jl +++ b/src/partitioneditensornetwork.jl @@ -1,4 +1,8 @@ -function linkinds(pitn::PartitionedGraph, edge::PartitionEdge) +using ITensors: commoninds +using ITensors.ITensorMPS: ITensorMPS +using NamedGraphs: PartitionedGraph, PartitionEdge, subgraph + +function ITensorMPS.linkinds(pitn::PartitionedGraph, edge::PartitionEdge) src_e_itn = subgraph(pitn, src(edge)) dst_e_itn = subgraph(pitn, dst(edge)) return commoninds(src_e_itn, dst_e_itn) diff --git a/src/sitetype.jl b/src/sitetype.jl index a807f16b..e1e0fa88 100644 --- a/src/sitetype.jl +++ b/src/sitetype.jl @@ -1,15 +1,15 @@ -function siteind(sitetype::String, v::Tuple; kwargs...) +function ITensors.siteind(sitetype::String, v::Tuple; kwargs...) return addtags(siteind(sitetype; kwargs...), ITensorNetworks.vertex_tag(v)) end # naming collision of ITensors.addtags and addtags keyword in siteind system -function siteind(d::Integer, v; addtags="", kwargs...) +function ITensors.siteind(d::Integer, v; addtags="", kwargs...) return ITensors.addtags( Index(d; tags="Site, $addtags", kwargs...), ITensorNetworks.vertex_tag(v) ) end -function siteinds(sitetypes::AbstractDictionary, g::AbstractGraph; kwargs...) +function ITensors.siteinds(sitetypes::AbstractDictionary, g::AbstractGraph; kwargs...) is = IndsNetwork(g) for v in vertices(g) is[v] = [siteind(sitetypes[v], vertex_tag(v); kwargs...)] @@ -17,10 +17,10 @@ function siteinds(sitetypes::AbstractDictionary, g::AbstractGraph; kwargs...) return is end -function siteinds(sitetype, g::AbstractGraph; kwargs...) +function ITensors.siteinds(sitetype, g::AbstractGraph; kwargs...) return siteinds(Dictionary(vertices(g), fill(sitetype, nv(g))), g; kwargs...) end -function siteinds(f::Function, g::AbstractGraph; kwargs...) +function ITensors.siteinds(f::Function, g::AbstractGraph; kwargs...) return siteinds(Dictionary(vertices(g), map(v -> f(v), vertices(g))), g; kwargs...) end diff --git a/src/solvers/alternating_update/alternating_update.jl b/src/solvers/alternating_update/alternating_update.jl index 7c423b0c..3268ebc5 100644 --- a/src/solvers/alternating_update/alternating_update.jl +++ b/src/solvers/alternating_update/alternating_update.jl @@ -1,3 +1,5 @@ +using Observers: Observers + function alternating_update( operator, init_state::AbstractTTN; @@ -77,7 +79,9 @@ function alternating_update( end end - update!(sweep_observer!; state, which_sweep, sweep_time, outputlevel, sweep_plans) + Observers.update!( + sweep_observer!; state, which_sweep, sweep_time, outputlevel, sweep_plans + ) !isnothing(sweep_printer) && sweep_printer(; state, which_sweep, sweep_time, outputlevel, sweep_plans) checkdone(; diff --git a/src/solvers/alternating_update/region_update.jl b/src/solvers/alternating_update/region_update.jl index 1085fa0a..ae2b2d78 100644 --- a/src/solvers/alternating_update/region_update.jl +++ b/src/solvers/alternating_update/region_update.jl @@ -1,3 +1,5 @@ +using Observers: Observers + #ToDo: generalize beyond 2-site #ToDo: remove concept of orthogonality center for generality function current_ortho(sweep_plan, which_region_update) @@ -122,7 +124,7 @@ function region_update( region_kwargs..., internal_kwargs..., ) - update!(region_observer!; all_kwargs...) + Observers.update!(region_observer!; all_kwargs...) !(isnothing(region_printer)) && region_printer(; all_kwargs...) return state, projected_operator diff --git a/src/solvers/contract.jl b/src/solvers/contract.jl index 34fa78a7..7a9fb2d9 100644 --- a/src/solvers/contract.jl +++ b/src/solvers/contract.jl @@ -1,3 +1,8 @@ +using Graphs: nv, vertices +using ITensors: ITensors, linkinds, sim +using ITensors.NDTensors: Algorithm, @Algorithm_str, contract +using NamedGraphs: vertextype + function sum_contract( ::Algorithm"fit", tns::Vector{<:Tuple{<:AbstractTTN,<:AbstractTTN}}; @@ -50,21 +55,23 @@ function sum_contract( return alternating_update(operator, init; nsweeps, nsites, updater, cutoff, kwargs...) end -function contract(a::Algorithm"fit", tn1::AbstractTTN, tn2::AbstractTTN; kwargs...) +function NDTensors.contract( + a::Algorithm"fit", tn1::AbstractTTN, tn2::AbstractTTN; kwargs... +) return sum_contract(a, [(tn1, tn2)]; kwargs...) end """ Overload of `ITensors.contract`. """ -function contract(tn1::AbstractTTN, tn2::AbstractTTN; alg="fit", kwargs...) +function NDTensors.contract(tn1::AbstractTTN, tn2::AbstractTTN; alg="fit", kwargs...) return contract(Algorithm(alg), tn1, tn2; kwargs...) end """ Overload of `ITensors.apply`. """ -function apply(tn1::AbstractTTN, tn2::AbstractTTN; init, kwargs...) +function ITensors.apply(tn1::AbstractTTN, tn2::AbstractTTN; init, kwargs...) if !isone(plev_diff(flatten_external_indsnetwork(tn1, tn2), external_indsnetwork(init))) error( "Initial guess `init` needs to primelevel one less than the contraction tn1 and tn2." diff --git a/src/solvers/dmrg.jl b/src/solvers/dmrg.jl index 271832d6..464be0ce 100644 --- a/src/solvers/dmrg.jl +++ b/src/solvers/dmrg.jl @@ -1,12 +1,17 @@ +using ITensors.ITensorMPS: ITensorMPS, dmrg +using KrylovKit: KrylovKit + """ -Overload of `ITensors.dmrg`. +Overload of `ITensors.ITensorMPS.dmrg`. """ -function dmrg(operator, init_state; nsweeps, nsites=2, updater=eigsolve_updater, kwargs...) +function ITensorMPS.dmrg( + operator, init_state; nsweeps, nsites=2, updater=eigsolve_updater, kwargs... +) return alternating_update(operator, init_state; nsweeps, nsites, updater, kwargs...) end """ Overload of `KrylovKit.eigsolve`. """ -eigsolve(H, init::AbstractTTN; kwargs...) = dmrg(H, init; kwargs...) +KrylovKit.eigsolve(H, init::AbstractTTN; kwargs...) = dmrg(H, init; kwargs...) diff --git a/src/solvers/linsolve.jl b/src/solvers/linsolve.jl index 154c8f9f..50577905 100644 --- a/src/solvers/linsolve.jl +++ b/src/solvers/linsolve.jl @@ -1,3 +1,4 @@ +using KrylovKit: KrylovKit """ $(TYPEDSIGNATURES) @@ -22,7 +23,7 @@ Keyword arguments: Overload of `KrylovKit.linsolve`. """ -function linsolve( +function KrylovKit.linsolve( A::AbstractTTN, b::AbstractTTN, x₀::AbstractTTN, diff --git a/src/tensornetworkoperators.jl b/src/tensornetworkoperators.jl index e515685b..c1ae29da 100644 --- a/src/tensornetworkoperators.jl +++ b/src/tensornetworkoperators.jl @@ -1,3 +1,6 @@ +using ITensors: ITensors, commoninds, product +using LinearAlgebra: factorize + """ Take a vector of gates which act on different edges/ vertices of an Inds network and construct the tno which represents prod(gates). """ diff --git a/src/treetensornetworks/abstracttreetensornetwork.jl b/src/treetensornetworks/abstracttreetensornetwork.jl index 052c3028..5a4c9808 100644 --- a/src/treetensornetworks/abstracttreetensornetwork.jl +++ b/src/treetensornetworks/abstracttreetensornetwork.jl @@ -4,7 +4,7 @@ abstract type AbstractTreeTensorNetwork{V} <: AbstractITensorNetwork{V} end const AbstractTTN = AbstractTreeTensorNetwork -function underlying_graph_type(G::Type{<:AbstractTTN}) +function DataGraphs.underlying_graph_type(G::Type{<:AbstractTTN}) return underlying_graph_type(data_graph_type(G)) end @@ -24,7 +24,7 @@ end # Orthogonality center # -isortho(ψ::AbstractTTN) = isone(length(ortho_center(ψ))) +ITensorMPS.isortho(ψ::AbstractTTN) = isone(length(ortho_center(ψ))) function set_ortho_center(ψ::AbstractTTN{V}, new_center::Vector{<:V}) where {V} return typeof(ψ)(itensor_network(ψ), new_center) @@ -89,7 +89,7 @@ end # Orthogonalization # -function orthogonalize(ψ::AbstractTTN{V}, root_vertex::V; kwargs...) where {V} +function ITensorMPS.orthogonalize(ψ::AbstractTTN{V}, root_vertex::V; kwargs...) where {V} (isortho(ψ) && only(ortho_center(ψ)) == root_vertex) && return ψ if isortho(ψ) edge_list = edge_path(ψ, only(ortho_center(ψ)), root_vertex) @@ -104,7 +104,7 @@ end # For ambiguity error -function orthogonalize(tn::AbstractTTN, edge::AbstractEdge; kwargs...) +function ITensorMPS.orthogonalize(tn::AbstractTTN, edge::AbstractEdge; kwargs...) return typeof(tn)(orthogonalize(ITensorNetwork(tn), edge; kwargs...)) end @@ -112,7 +112,7 @@ end # Truncation # -function truncate(ψ::AbstractTTN; root_vertex=default_root_vertex(ψ), kwargs...) +function Base.truncate(ψ::AbstractTTN; root_vertex=default_root_vertex(ψ), kwargs...) for e in post_order_dfs_edges(ψ, root_vertex) # always orthogonalize towards source first to make truncations controlled ψ = orthogonalize(ψ, src(e)) @@ -123,7 +123,7 @@ function truncate(ψ::AbstractTTN; root_vertex=default_root_vertex(ψ), kwargs.. end # For ambiguity error -function truncate(tn::AbstractTTN, edge::AbstractEdge; kwargs...) +function Base.truncate(tn::AbstractTTN, edge::AbstractEdge; kwargs...) return typeof(tn)(truncate(ITensorNetwork(tn), edge; kwargs...)) end @@ -132,7 +132,7 @@ end # # TODO: decide on contraction order: reverse dfs vertices or forward dfs edges? -function contract( +function NDTensors.contract( ψ::AbstractTTN{V}, root_vertex::V=default_root_vertex(ψ); kwargs... ) where {V} ψ = copy(ψ) @@ -147,7 +147,9 @@ function contract( # return ψ[root_vertex] end -function inner(ϕ::AbstractTTN, ψ::AbstractTTN; root_vertex=default_root_vertex(ϕ, ψ)) +function ITensors.inner( + ϕ::AbstractTTN, ψ::AbstractTTN; root_vertex=default_root_vertex(ϕ, ψ) +) ϕᴴ = sim(dag(ϕ); sites=[]) ψ = sim(ψ; sites=[]) ϕψ = ϕᴴ ⊗ ψ @@ -165,7 +167,7 @@ function inner(ϕ::AbstractTTN, ψ::AbstractTTN; root_vertex=default_root_vertex return ϕψ[root_vertex, 1][] end -function norm(ψ::AbstractTTN) +function LinearAlgebra.norm(ψ::AbstractTTN) if isortho(ψ) return norm(ψ[only(ortho_center(ψ))]) end @@ -176,7 +178,7 @@ end # Utility # -function normalize!(ψ::AbstractTTN) +function LinearAlgebra.normalize!(ψ::AbstractTTN) c = ortho_center(ψ) lognorm_ψ = lognorm(ψ) if lognorm_ψ == -Inf @@ -189,7 +191,7 @@ function normalize!(ψ::AbstractTTN) return ψ end -function normalize(ψ::AbstractTTN) +function LinearAlgebra.normalize(ψ::AbstractTTN) return normalize!(copy(ψ)) end @@ -215,7 +217,7 @@ function LinearAlgebra.rmul!(ψ::AbstractTTN, α::Number) return _apply_to_orthocenter!(*, ψ, α) end -function lognorm(ψ::AbstractTTN) +function ITensorMPS.lognorm(ψ::AbstractTTN) if isortho(ψ) return log(norm(ψ[only(ortho_center(ψ))])) end @@ -233,7 +235,7 @@ function logdot(ψ1::TTNT, ψ2::TTNT; kwargs...) where {TTNT<:AbstractTTN} end # TODO: stick with this traversal or find optimal contraction sequence? -function loginner( +function ITensorMPS.loginner( ψ1::TTNT, ψ2::TTNT; root_vertex=default_root_vertex(ψ1, ψ2) )::Number where {TTNT<:AbstractTTN} N = nv(ψ1) @@ -331,7 +333,9 @@ function ITensors.add(tn1::AbstractTTN, tn2::AbstractTTN; kwargs...) end # TODO: Delete this -function permute(ψ::AbstractTTN, ::Tuple{typeof(linkind),typeof(siteinds),typeof(linkind)}) +function ITensors.permute( + ψ::AbstractTTN, ::Tuple{typeof(linkind),typeof(siteinds),typeof(linkind)} +) ψ̃ = copy(ψ) for v in vertices(ψ) ls = [only(linkinds(ψ, n => v)) for n in neighbors(ψ, v)] # TODO: won't work for multiple indices per link... @@ -365,7 +369,7 @@ end # # TODO: implement using multi-graph disjoint union -function inner( +function ITensors.inner( y::AbstractTTN, A::AbstractTTN, x::AbstractTTN; root_vertex=default_root_vertex(x, A, y) ) traversal_order = reverse(post_order_dfs_vertices(x, root_vertex)) @@ -379,7 +383,7 @@ function inner( end # TODO: implement using multi-graph disjoint -function inner( +function ITensors.inner( B::AbstractTTN, y::AbstractTTN, A::AbstractTTN, @@ -409,7 +413,7 @@ function inner( return O[] end -function expect( +function ITensorMPS.expect( operator::String, state::AbstractTTN; vertices=vertices(state), diff --git a/src/treetensornetworks/projttns/abstractprojttn.jl b/src/treetensornetworks/projttns/abstractprojttn.jl index 4a2bc175..63ff4bf7 100644 --- a/src/treetensornetworks/projttns/abstractprojttn.jl +++ b/src/treetensornetworks/projttns/abstractprojttn.jl @@ -1,12 +1,18 @@ +using DataGraphs: DataGraphs, underlying_graph +using Graphs: neighbors +using ITensors: ITensor, contract, order +using ITensors.ITensorMPS: ITensorMPS, nsite +using NamedGraphs: NamedGraphs, NamedEdge, incident_edges, vertextype + abstract type AbstractProjTTN{V} end environments(::AbstractProjTTN) = error("Not implemented") operator(::AbstractProjTTN) = error("Not implemented") pos(::AbstractProjTTN) = error("Not implemented") -underlying_graph(P::AbstractProjTTN) = error("Not implemented") +DataGraphs.underlying_graph(P::AbstractProjTTN) = error("Not implemented") -copy(::AbstractProjTTN) = error("Not implemented") +Base.copy(::AbstractProjTTN) = error("Not implemented") set_nsite(::AbstractProjTTN, nsite) = error("Not implemented") @@ -22,14 +28,14 @@ Graphs.edgetype(P::AbstractProjTTN) = edgetype(underlying_graph(P)) on_edge(P::AbstractProjTTN) = isa(pos(P), edgetype(P)) -nsite(P::AbstractProjTTN) = on_edge(P) ? 0 : length(pos(P)) +ITensorMPS.nsite(P::AbstractProjTTN) = on_edge(P) ? 0 : length(pos(P)) function sites(P::AbstractProjTTN{V}) where {V} on_edge(P) && return V[] return pos(P) end -function incident_edges(P::AbstractProjTTN{V})::Vector{NamedEdge{V}} where {V} +function NamedGraphs.incident_edges(P::AbstractProjTTN{V})::Vector{NamedEdge{V}} where {V} on_edge(P) && return [pos(P), reverse(pos(P))] edges = [ [edgetype(P)(n => v) for n in setdiff(neighbors(underlying_graph(P), v), sites(P))] for @@ -67,11 +73,11 @@ end projected_operator_tensors(P::AbstractProjTTN) = error("Not implemented.") -function contract(P::AbstractProjTTN, v::ITensor) +function NDTensors.contract(P::AbstractProjTTN, v::ITensor) return foldl(*, projected_operator_tensors(P); init=v) end -function product(P::AbstractProjTTN, v::ITensor) +function ITensors.product(P::AbstractProjTTN, v::ITensor) Pv = contract(P, v) if order(Pv) != order(v) error( @@ -101,8 +107,8 @@ function Base.eltype(P::AbstractProjTTN)::Type return ElType end -vertextype(::Type{<:AbstractProjTTN{V}}) where {V} = V -vertextype(p::AbstractProjTTN) = vertextype(typeof(p)) +NamedGraphs.vertextype(::Type{<:AbstractProjTTN{V}}) where {V} = V +NamedGraphs.vertextype(p::AbstractProjTTN) = vertextype(typeof(p)) function Base.size(P::AbstractProjTTN)::Tuple{Int,Int} d = 1 diff --git a/src/treetensornetworks/projttns/projouterprodttn.jl b/src/treetensornetworks/projttns/projouterprodttn.jl index d507202e..74995c15 100644 --- a/src/treetensornetworks/projttns/projouterprodttn.jl +++ b/src/treetensornetworks/projttns/projouterprodttn.jl @@ -1,3 +1,6 @@ +using DataGraphs: DataGraphs +using NamedGraphs: incident_edges + struct ProjOuterProdTTN{V} <: AbstractProjTTN{V} pos::Union{Vector{<:V},NamedEdge{V}} internal_state::TTN{V} @@ -7,7 +10,7 @@ end environments(p::ProjOuterProdTTN) = p.environments operator(p::ProjOuterProdTTN) = p.operator -underlying_graph(p::ProjOuterProdTTN) = underlying_graph(operator(p)) +DataGraphs.underlying_graph(p::ProjOuterProdTTN) = underlying_graph(operator(p)) pos(p::ProjOuterProdTTN) = p.pos internal_state(p::ProjOuterProdTTN) = p.internal_state @@ -20,7 +23,7 @@ function ProjOuterProdTTN(internal_state::AbstractTTN, operator::AbstractTTN) ) end -function copy(P::ProjOuterProdTTN) +function Base.copy(P::ProjOuterProdTTN) return ProjOuterProdTTN( pos(P), copy(internal_state(P)), copy(operator(P)), copy(environments(P)) ) @@ -107,7 +110,7 @@ function contract_ket(P::ProjOuterProdTTN, v::ITensor) end # ToDo: verify conjugation etc. with complex AbstractTTN -function contract(P::ProjOuterProdTTN, x::ITensor) +function NDTensors.contract(P::ProjOuterProdTTN, x::ITensor) ket = contract_ket(P, ITensor(one(Bool))) return (dag(ket) * x) * ket end diff --git a/src/treetensornetworks/projttns/projttn.jl b/src/treetensornetworks/projttns/projttn.jl index 4d9636ab..7d86d6fb 100644 --- a/src/treetensornetworks/projttns/projttn.jl +++ b/src/treetensornetworks/projttns/projttn.jl @@ -1,3 +1,9 @@ +using DataGraphs: DataGraphs, underlying_graph +using Dictionaries: Dictionary +using Graphs: edgetype, vertices +using ITensors: ITensor +using NamedGraphs: NamedEdge, incident_edges + """ ProjTTN """ @@ -11,12 +17,12 @@ function ProjTTN(operator::TTN) return ProjTTN(vertices(operator), operator, Dictionary{edgetype(operator),ITensor}()) end -copy(P::ProjTTN) = ProjTTN(pos(P), copy(operator(P)), copy(environments(P))) +Base.copy(P::ProjTTN) = ProjTTN(pos(P), copy(operator(P)), copy(environments(P))) #accessors for fields environments(p::ProjTTN) = p.environments operator(p::ProjTTN) = p.operator -underlying_graph(P::ProjTTN) = underlying_graph(operator(P)) +DataGraphs.underlying_graph(P::ProjTTN) = underlying_graph(operator(P)) pos(P::ProjTTN) = P.pos # trivial if we choose to specify position as above; only kept to allow using alongside diff --git a/src/treetensornetworks/projttns/projttnsum.jl b/src/treetensornetworks/projttns/projttnsum.jl index b731b989..4abb8965 100644 --- a/src/treetensornetworks/projttns/projttnsum.jl +++ b/src/treetensornetworks/projttns/projttnsum.jl @@ -1,3 +1,7 @@ +using ITensors: ITensors, contract +using ITensors.LazyApply: LazyApply, terms +using NamedGraphs: NamedGraphs, incident_edges + """ ProjTTNSum """ @@ -9,10 +13,10 @@ struct ProjTTNSum{V,T<:AbstractProjTTN{V},Z<:Number} <: AbstractProjTTN{V} end end -terms(P::ProjTTNSum) = P.terms +LazyApply.terms(P::ProjTTNSum) = P.terms factors(P::ProjTTNSum) = P.factors -copy(P::ProjTTNSum) = ProjTTNSum(copy.(terms(P)), copy(factors(P))) +Base.copy(P::ProjTTNSum) = ProjTTNSum(copy.(terms(P)), copy(factors(P))) function ProjTTNSum(operators::Vector{<:AbstractProjTTN}) return ProjTTNSum(operators, fill(one(Bool), length(operators))) @@ -23,25 +27,25 @@ end on_edge(P::ProjTTNSum) = on_edge(terms(P)[1]) -nsite(P::ProjTTNSum) = nsite(terms(P)[1]) +ITensorMPS.nsite(P::ProjTTNSum) = nsite(terms(P)[1]) function set_nsite(Ps::ProjTTNSum, nsite) return ProjTTNSum(map(p -> set_nsite(p, nsite), terms(Ps)), factors(Ps)) end -underlying_graph(P::ProjTTNSum) = underlying_graph(terms(P)[1]) +DataGraphs.underlying_graph(P::ProjTTNSum) = underlying_graph(terms(P)[1]) Base.length(P::ProjTTNSum) = length(terms(P)[1]) sites(P::ProjTTNSum) = sites(terms(P)[1]) -incident_edges(P::ProjTTNSum) = incident_edges(terms(P)[1]) +NamedGraphs.incident_edges(P::ProjTTNSum) = incident_edges(terms(P)[1]) internal_edges(P::ProjTTNSum) = internal_edges(terms(P)[1]) -product(P::ProjTTNSum, v::ITensor) = noprime(contract(P, v)) +ITensors.product(P::ProjTTNSum, v::ITensor) = noprime(contract(P, v)) -function contract(P::ProjTTNSum, v::ITensor) +function ITensors.contract(P::ProjTTNSum, v::ITensor) res = mapreduce(+, zip(factors(P), terms(P))) do (f, p) f * contract(p, v) end diff --git a/src/treetensornetworks/ttn.jl b/src/treetensornetworks/ttn.jl index 96b96d34..09296fbe 100644 --- a/src/treetensornetworks/ttn.jl +++ b/src/treetensornetworks/ttn.jl @@ -24,7 +24,7 @@ function data_graph_type(G::Type{<:TTN}) return data_graph_type(fieldtype(G, :itensor_network)) end -function copy(ψ::TTN) +function Base.copy(ψ::TTN) return TTN(copy(ψ.itensor_network), copy(ψ.ortho_center)) end @@ -174,7 +174,7 @@ end # Utility # -function replacebond!(T::TTN, edge::AbstractEdge, phi::ITensor; kwargs...) +function ITensorMPS.replacebond!(T::TTN, edge::AbstractEdge, phi::ITensor; kwargs...) ortho::String = get(kwargs, :ortho, "left") swapsites::Bool = get(kwargs, :swapsites, false) which_decomp::Union{String,Nothing} = get(kwargs, :which_decomp, nothing) @@ -203,10 +203,10 @@ function replacebond!(T::TTN, edge::AbstractEdge, phi::ITensor; kwargs...) return spec end -function replacebond!(T::TTN, edge::Pair, phi::ITensor; kwargs...) +function ITensorMPS.replacebond!(T::TTN, edge::Pair, phi::ITensor; kwargs...) return replacebond!(T, edgetype(T)(edge), phi; kwargs...) end -function replacebond(T0::TTN, args...; kwargs...) +function ITensorMPS.replacebond(T0::TTN, args...; kwargs...) return replacebond!(copy(T0), args...; kwargs...) end diff --git a/src/utils.jl b/src/utils.jl index c8f95045..6a82be30 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -33,6 +33,7 @@ end extend_or_truncate(x, length::Int) = extend_or_truncate([x], length) +using StructWalk: StructWalk, WalkStyle, postwalk # Treat `AbstractArray` as leaves. struct AbstractArrayLeafStyle <: WalkStyle end diff --git a/src/visualize.jl b/src/visualize.jl index 677c9fe9..5c0b17ff 100644 --- a/src/visualize.jl +++ b/src/visualize.jl @@ -1,5 +1,9 @@ -# ITensorVisualizationBase overload -function visualize( +using DataGraphs: AbstractDataGraph, underlying_graph +using Graphs: vertices +using ITensors.ITensorVisualizationCore: ITensorVisualizationCore, visualize +using NamedGraphs: AbstractNamedGraph, parent_graph + +function ITensorVisualizationCore.visualize( graph::AbstractNamedGraph, args...; vertex_labels_prefix=nothing, @@ -13,7 +17,6 @@ function visualize( return visualize(parent_graph(graph), args...; vertex_labels, kwargs...) end -# ITensorVisualizationBase overload -function visualize(graph::AbstractDataGraph, args...; kwargs...) +function ITensorVisualizationCore.visualize(graph::AbstractDataGraph, args...; kwargs...) return visualize(underlying_graph(graph), args...; kwargs...) end diff --git a/test/Project.toml b/test/Project.toml index 05386c5c..bf8c4591 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,7 +2,6 @@ AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" -DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5" Glob = "c27321d9-0574-5035-807b-f59d2c89b15c" @@ -19,6 +18,7 @@ Metis = "2679e427-3c69-5b7f-982b-ece356f1e94b" NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" +OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66" diff --git a/test/test_treetensornetworks/test_solvers/ITensorNetworksTestSolversUtils/ITensorNetworksTestSolversUtils.jl b/test/test_treetensornetworks/test_solvers/ITensorNetworksTestSolversUtils/ITensorNetworksTestSolversUtils.jl new file mode 100644 index 00000000..08393acb --- /dev/null +++ b/test/test_treetensornetworks/test_solvers/ITensorNetworksTestSolversUtils/ITensorNetworksTestSolversUtils.jl @@ -0,0 +1,3 @@ +module ITensorNetworksTestSolversUtils +include("solvers.jl") +end diff --git a/examples/treetensornetworks/solvers/03_solvers.jl b/test/test_treetensornetworks/test_solvers/ITensorNetworksTestSolversUtils/solvers.jl similarity index 75% rename from examples/treetensornetworks/solvers/03_solvers.jl rename to test/test_treetensornetworks/test_solvers/ITensorNetworksTestSolversUtils/solvers.jl index d8f00580..82924f74 100644 --- a/examples/treetensornetworks/solvers/03_solvers.jl +++ b/test/test_treetensornetworks/test_solvers/ITensorNetworksTestSolversUtils/solvers.jl @@ -1,6 +1,6 @@ -using DifferentialEquations -using ITensors -using ITensorNetworks +using OrdinaryDiffEq: ODEProblem, Tsit5, solve +using ITensors: ITensor +using ITensorNetworks: TimeDependentSum, to_vec using KrylovKit: exponentiate function ode_solver( @@ -17,13 +17,13 @@ function ode_solver( end time_span = (current_time, current_time + time_step) - u₀, ITensor_from_vec = to_vec(ψ₀) + u₀, itensor_from_vec = to_vec(ψ₀) f(ψ::ITensor, p, t) = H(t)(ψ) - f(u::Vector, p, t) = to_vec(f(ITensor_from_vec(u), p, t))[1] + f(u::Vector, p, t) = to_vec(f(itensor_from_vec(u), p, t))[1] prob = ODEProblem(f, u₀, time_span) sol = solve(prob, solver_alg; kwargs...) uₜ = sol.u[end] - return ITensor_from_vec(uₜ), nothing + return itensor_from_vec(uₜ), nothing end function krylov_solver( diff --git a/test/test_treetensornetworks/test_solvers/Project.toml b/test/test_treetensornetworks/test_solvers/Project.toml new file mode 100644 index 00000000..3b8c43c1 --- /dev/null +++ b/test/test_treetensornetworks/test_solvers/Project.toml @@ -0,0 +1,10 @@ +[deps] +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" +ITensorNetworks = "2919e153-833c-4bdc-8836-1ea460a35fc7" +ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" +NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" +Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" +OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/test_treetensornetworks/test_solvers/test_tdvp.jl b/test/test_treetensornetworks/test_solvers/test_tdvp.jl index 9943caa2..1bc39a19 100644 --- a/test/test_treetensornetworks/test_solvers/test_tdvp.jl +++ b/test/test_treetensornetworks/test_solvers/test_tdvp.jl @@ -1,12 +1,24 @@ -using ITensors -using ITensorNetworks -using ITensorNetworks: exponentiate_updater -using KrylovKit: exponentiate -using Observers -using Random -using Test - -#ToDo: Add tests for different signatures and functionality of extending the params +@eval module $(gensym()) +using Graphs: dst, edges, src +using ITensors: ITensor, contract, dag, inner, noprime, normalize, prime, scalar +using ITensorNetworks: + ITensorNetworks, + OpSum, + TTN, + apply, + expect, + mpo, + mps, + op, + random_mps, + random_ttn, + siteinds, + tdvp +using LinearAlgebra: norm +using NamedGraphs: named_binary_tree, named_comb_tree +using Observers: observer +using Test: @testset, @test + @testset "MPS TDVP" begin @testset "Basic TDVP" begin N = 10 @@ -193,7 +205,7 @@ using Test cutoff, normalize=false, updater_kwargs=(; tol=1e-12, maxiter=500, krylovdim=25), - updater=exponentiate_updater, + updater=ITensorNetworks.exponentiate_updater, ) # TODO: What should `expect` output? Right now # it outputs a dictionary. @@ -253,7 +265,6 @@ using Test for step in 1:Nsteps state = apply(gates, state; cutoff) - #normalize!(state) nsites = (step <= 3 ? 2 : 1) phi = tdvp( @@ -279,7 +290,7 @@ using Test phi = mps(s; states=(n -> isodd(n) ? "Up" : "Dn")) - obs = Observer( + obs = observer( "Sz" => (; state) -> expect("Sz", state; vertices=[c])[c], "En" => (; state) -> real(inner(state', H, state)), ) @@ -361,12 +372,12 @@ using Test measure_sz(; state) = expect("Sz", state; vertices=[c])[c] measure_en(; state) = real(inner(state', H, state)) - sweep_obs = Observer("Sz" => measure_sz, "En" => measure_en) + sweep_obs = observer("Sz" => measure_sz, "En" => measure_en) get_info(; info) = info step_measure_sz(; state) = expect("Sz", state; vertices=[c])[c] step_measure_en(; state) = real(inner(state', H, state)) - region_obs = Observer( + region_obs = observer( "Sz" => step_measure_sz, "En" => step_measure_en, "info" => get_info ) @@ -411,7 +422,7 @@ end H = TTN(os, s) - ψ0 = normalize!(random_ttn(s)) + ψ0 = normalize(random_ttn(s)) # Time evolve forward: ψ1 = tdvp(H, -0.1im, ψ0; root_vertex, nsweeps=1, cutoff, nsites=2) @@ -453,7 +464,7 @@ end H2 = TTN(os2, s) Hs = [H1, H2] - ψ0 = normalize!(random_ttn(s; link_space=10)) + ψ0 = normalize(random_ttn(s; link_space=10)) ψ1 = tdvp(Hs, -0.1im, ψ0; nsweeps=1, cutoff, nsites=1) @@ -564,7 +575,6 @@ end for step in 1:Nsteps state = apply(gates, state; cutoff, maxdim) - #normalize!(state) nsites = (step <= 3 ? 2 : 1) phi = tdvp( @@ -589,7 +599,7 @@ end # phi = TTN(s, v -> iseven(sum(isodd.(v))) ? "Up" : "Dn") - obs = Observer( + obs = observer( "Sz" => (; state) -> expect("Sz", state; vertices=[c])[c], "En" => (; state) -> real(inner(state', H, state)), ) @@ -622,7 +632,7 @@ end os = ITensorNetworks.heisenberg(c) H = TTN(os, s) - state = normalize!(random_ttn(s; link_space=2)) + state = normalize(random_ttn(s; link_space=2)) trange = 0.0:tau:ttotal for (step, t) in enumerate(trange) @@ -641,100 +651,5 @@ end @test inner(state', H, state) < -2.47 end - - # TODO: verify quantum number suport in ITensorNetworks - - # @testset "Observers" begin - # cutoff = 1e-12 - # tau = 0.1 - # ttotal = 1.0 - - # tooth_lengths = fill(2, 3) - # c = named_comb_tree(tooth_lengths) - # s = siteinds("S=1/2", c; conserve_qns=true) - - # os = ITensorNetworks.heisenberg(c) - # H = TTN(os, s) - - # c = (2, 2) - - # # - # # Using the ITensors observer system - # # - # struct TDVPObserver <: AbstractObserver end - - # Nsteps = convert(Int, ceil(abs(ttotal / tau))) - # Sz1 = zeros(Nsteps) - # En1 = zeros(Nsteps) - # function ITensors.measure!(obs::TDVPObserver; sweep, bond, half_sweep, psi, kwargs...) - # if bond == 1 && half_sweep == 2 - # Sz1[sweep] = expect("Sz", psi; vertices=[c])[c] - # En1[sweep] = real(inner(psi', H, psi)) - # end - # end - - # psi1 = productMPS(s, n -> isodd(n) ? "Up" : "Dn") - # tdvp( - # H, - # -im * ttotal, - # psi1; - # time_step=-im * tau, - # cutoff, - # normalize=false, - # (observer!)=TDVPObserver(), - # root_vertex=N, - # ) - - # # - # # Using Observers.jl - # # - - # function measure_sz(; psi, bond, half_sweep) - # if bond == 1 && half_sweep == 2 - # return expect("Sz", psi; vertices=[c])[c] - # end - # return nothing - # end - - # function measure_en(; psi, bond, half_sweep) - # if bond == 1 && half_sweep == 2 - # return real(inner(psi', H, psi)) - # end - # return nothing - # end - - # obs = Observer("Sz" => measure_sz, "En" => measure_en) - - # step_measure_sz(; psi) = expect("Sz", psi; vertices=[c])[c] - - # step_measure_en(; psi) = real(inner(psi', H, psi)) - - # step_obs = Observer("Sz" => step_measure_sz, "En" => step_measure_en) - - # psi2 = MPS(s, n -> isodd(n) ? "Up" : "Dn") - # tdvp( - # H, - # -im * ttotal, - # psi2; - # time_step=-im * tau, - # cutoff, - # normalize=false, - # (observer!)=obs, - # (step_observer!)=step_obs, - # root_vertex=N, - # ) - - # Sz2 = results(obs)["Sz"] - # En2 = results(obs)["En"] - - # Sz2_step = results(step_obs)["Sz"] - # En2_step = results(step_obs)["En"] - - # @test Sz1 ≈ Sz2 - # @test En1 ≈ En2 - # @test Sz1 ≈ Sz2_step - # @test En1 ≈ En2_step - # end end - -nothing +end diff --git a/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl b/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl index ba437270..872a6720 100644 --- a/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl +++ b/test/test_treetensornetworks/test_solvers/test_tdvp_time_dependent.jl @@ -1,16 +1,20 @@ -using DifferentialEquations -using ITensors -using ITensorNetworks: NamedGraphs.AbstractNamedEdge +@eval module $(gensym()) +using ITensors: contract +using ITensorNetworks: ITensorNetworks, TimeDependentSum, TTN, mpo, mps, siteinds, tdvp +using OrdinaryDiffEq: Tsit5 using KrylovKit: exponentiate -using LinearAlgebra -using Test - -const ttn_solvers_examples_dir = joinpath( - pkgdir(ITensorNetworks), "examples", "treetensornetworks", "solvers" +using LinearAlgebra: norm +using NamedGraphs: AbstractNamedEdge, named_comb_tree +using Test: @test, @test_broken, @testset + +include( + joinpath( + @__DIR__, "ITensorNetworksTestSolversUtils", "ITensorNetworksTestSolversUtils.jl" + ), ) -include(joinpath(ttn_solvers_examples_dir, "03_models.jl")) -include(joinpath(ttn_solvers_examples_dir, "03_solvers.jl")) +using .ITensorNetworksTestSolversUtils: + ITensorNetworksTestSolversUtils, krylov_solver, ode_solver # Functions need to be defined in global scope (outside # of the @testset macro) @@ -40,7 +44,7 @@ function ode_updater( ) region = first(sweep_plan[which_region_update]) (; time_step, t) = internal_kwargs - t = isa(region, ITensorNetworks.NamedGraphs.AbstractNamedEdge) ? t : t + time_step + t = isa(region, AbstractNamedEdge) ? t : t + time_step H⃗₀ = projected_operator![] result, info = ode_solver( @@ -64,7 +68,9 @@ end krylov_kwargs = (; tol=1e-8, krylovdim=15, eager=true) krylov_updater_kwargs = (; f=[f⃗], krylov_kwargs) -function krylov_solver(H⃗₀, ψ₀; time_step, ishermitian=false, issymmetric=false, kwargs...) +function ITensorNetworksTestSolversUtils.krylov_solver( + H⃗₀, ψ₀; time_step, ishermitian=false, issymmetric=false, kwargs... +) psi_t, info = krylov_solver( -im * TimeDependentSum(f⃗, H⃗₀), time_step, @@ -93,7 +99,7 @@ function krylov_updater( (; time_step, t) = internal_kwargs H⃗₀ = projected_operator![] region = first(sweep_plan[which_region_update]) - t = isa(region, ITensorNetworks.NamedGraphs.AbstractNamedEdge) ? t : t + time_step + t = isa(region, AbstractNamedEdge) ? t : t + time_step result, info = krylov_solver( -im * TimeDependentSum(f, H⃗₀), @@ -225,5 +231,4 @@ end @test ode_err < 1e-2 @test krylov_err < 1e-2 end - -nothing +end