Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ITensorsNamedDimsArraysExt] Convert symmetric tensors #1578

Merged
merged 3 commits into from
Nov 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <[email protected]>"]
version = "0.3.62"
version = "0.3.63"

[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
Expand Down
15 changes: 15 additions & 0 deletions NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,21 @@ function blockedunitrange_getindices(
return flip_blockvector(v)
end

# Fixes ambiguity error.
# TODO: Write this in terms of `blockedunitrange_getindices(dual(a), indices)`.
function blockedunitrange_getindices(
a::GradedUnitRangeDual, indices::AbstractBlockVector{<:Block{1}}
)
blks = map(bs -> mortar(map(b -> a[b], bs)), blocks(indices))
# We pass `length.(blks)` to `mortar` in order
# to pass block labels to the axes of the output,
# if they exist. This makes it so that
# `only(axes(a[indices])) isa `GradedUnitRange`
# if `a isa `GradedUnitRange`, for example.
v = mortar(blks, labelled_length.(blks))
return flip_blockvector(v)
end

function flip_blockvector(v::BlockVector)
block_axes = flip.(axes(v))
flipped = mortar(vec.(blocks(v)), block_axes)
Expand Down
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ITensors"
uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5"
authors = ["Matthew Fishman <[email protected]>", "Miles Stoudenmire <[email protected]>"]
version = "0.7.4"
version = "0.7.5"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Expand Down
3 changes: 1 addition & 2 deletions src/lib/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
using Adapt: adapt
using ITensors: MPO, dmrg, random_mps, siteinds
using ITensors.Ops: OpSum
using ITensorMPS: MPO, OpSum, dmrg, random_mps, siteinds
using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray

function main(; n, conserve_qns=false, nsweeps=3, cutoff=1e-4, arraytype=Array)
Expand Down
28 changes: 23 additions & 5 deletions src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
using ..NDTensors: data, inds
using ITensors: ITensor
using ..NDTensors: data, inds

# TODO: Delete this, it is a hack to decide
# if an Index is blocked.
Expand Down Expand Up @@ -34,21 +34,39 @@ function to_nameddimsarray(x::DiagTensor)
return named(DiagonalArray(data(x), size(x)), name.(inds(x)))
end

using ..NDTensors: BlockSparseTensor
using ITensors: ITensors, dir, qn
using ..NDTensors: BlockSparseTensor, array, blockdim, datatype, nblocks, nzblocks
using ..NDTensors.BlockSparseArrays: BlockSparseArray
using ..NDTensors.BlockSparseArrays.BlockArrays: BlockArrays, blockedrange
using ..NDTensors.GradedAxes: dual, gradedrange
using ..NDTensors.TypeParameterAccessors: set_ndims
# TODO: Delete once `BlockSparse` is removed.
function to_nameddimsarray(x::BlockSparseTensor)
blockinds = map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds(x))
blockinds = map(inds(x)) do i
r = gradedrange([qn(i, b) => blockdim(i, b) for b in 1:nblocks(i)])
if dir(i) == ITensors.In
return dual(r)
end
return r
end
blocktype = set_ndims(datatype(x), ndims(x))
# TODO: Make a simpler constructor:
# BlockSparseArray(blocktype, blockinds)
arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(blockinds)
arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(undef, blockinds)
for b in nzblocks(x)
arraystorage[BlockArrays.Block(Tuple(b)...)] = x[b]
arraystorage[BlockArrays.Block(Int.(Tuple(b))...)] = array(x[b])
end
return named(arraystorage, name.(inds(x)))
end

using ITensors: QN
using ..NDTensors.GradedAxes: GradedAxes
GradedAxes.fuse_labels(l1::QN, l2::QN) = l1 + l2

using ITensors: QN
using ..NDTensors.SymmetrySectors: SymmetrySectors
SymmetrySectors.dual(l::QN) = -l

## TODO: Add this back, define `CombinerArrays` library in NDTensors!
## using ..NDTensors: CombinerTensor, CombinerArray, storage
## # TODO: Delete when we directly use `CombinerArray` as storage.
Expand Down
5 changes: 5 additions & 0 deletions src/lib/ITensorsNamedDimsArraysExt/test/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[deps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
33 changes: 33 additions & 0 deletions src/lib/ITensorsNamedDimsArraysExt/test/test_basics.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
@eval module $(gensym())
using BlockArrays: blocklengths
using ITensors: ITensor, Index, QN, dag, inds, plev, random_itensor
using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray
using NDTensors: tensor
using NDTensors.BlockSparseArrays: BlockSparseArray, block_nstored
using NDTensors.GradedAxes: isdual
using NDTensors.LabelledNumbers: label
using NDTensors.NamedDimsArrays: NamedDimsArray, unname
using Test: @test, @testset
@testset "to_nameddimsarray" begin
i = Index([QN(0) => 2, QN(1) => 3])
a = random_itensor(i', dag(i))
b = to_nameddimsarray(a)
@test b isa ITensor
@test plev(inds(b)[1]) == 1
@test plev(inds(b)[2]) == 0
@test inds(b)[1] == i'
@test inds(b)[2] == dag(i)
nb = tensor(b)
@test nb isa NamedDimsArray{Float64}
bb = unname(nb)
@test bb isa BlockSparseArray{Float64}
@test !isdual(axes(bb, 1))
@test isdual(axes(bb, 2))
@test blocklengths(axes(bb, 1)) == [2, 3]
@test blocklengths(axes(bb, 2)) == [2, 3]
@test label.(blocklengths(axes(bb, 1))) == [QN(0), QN(1)]
@test label.(blocklengths(axes(bb, 2))) == [QN(0), QN(-1)]
@test block_nstored(bb) == 2
@test b' * b ≈ to_nameddimsarray(a' * a)
end
end
16 changes: 14 additions & 2 deletions src/lib/ITensorsNamedDimsArraysExt/test/test_examples.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,17 @@
@eval module $(gensym())
using ITensors: ITensors
using Suppressor: @suppress
using Test: @testset

@testset "examples" begin
include("../examples/example_readme.jl")
@suppress include(
joinpath(
pkgdir(ITensors),
"src",
"lib",
"ITensorsNamedDimsArraysExt",
"examples",
"example_readme.jl",
),
)
end
end
1 change: 1 addition & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
[deps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a"
Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa"
Expand Down
4 changes: 4 additions & 0 deletions test/lib/ITensorsNamedDimsArraysExt/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
[deps]
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
8 changes: 8 additions & 0 deletions test/lib/ITensorsNamedDimsArraysExt/runtests.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
@eval module $(gensym())
using ITensors: ITensors
include(
joinpath(
pkgdir(ITensors), "src", "lib", "ITensorsNamedDimsArraysExt", "test", "runtests.jl"
),
)
end
1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ ITensors.disable_threaded_blocksparse()
"base",
"threading",
"lib/ContractionSequenceOptimization",
"lib/ITensorsNamedDimsArraysExt",
"ext/ITensorsChainRulesCoreExt",
"ext/ITensorsVectorInterfaceExt",
"ext/NDTensorsMappedArraysExt",
Expand Down
Loading