diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index b4edf4d5de..23a9edd645 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.62" +version = "0.3.63" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl b/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl index 97c8a96d71..df65ed6155 100644 --- a/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl +++ b/NDTensors/src/lib/GradedAxes/src/gradedunitrangedual.jl @@ -94,6 +94,21 @@ function blockedunitrange_getindices( return flip_blockvector(v) end +# Fixes ambiguity error. +# TODO: Write this in terms of `blockedunitrange_getindices(dual(a), indices)`. +function blockedunitrange_getindices( + a::GradedUnitRangeDual, indices::AbstractBlockVector{<:Block{1}} +) + blks = map(bs -> mortar(map(b -> a[b], bs)), blocks(indices)) + # We pass `length.(blks)` to `mortar` in order + # to pass block labels to the axes of the output, + # if they exist. This makes it so that + # `only(axes(a[indices])) isa `GradedUnitRange` + # if `a isa `GradedUnitRange`, for example. + v = mortar(blks, labelled_length.(blks)) + return flip_blockvector(v) +end + function flip_blockvector(v::BlockVector) block_axes = flip.(axes(v)) flipped = mortar(vec.(blocks(v)), block_axes) diff --git a/Project.toml b/Project.toml index 1eecd3e40f..ec4584bc2f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.7.4" +version = "0.7.5" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl b/src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl index c4d7b237fb..63f1591165 100644 --- a/src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl +++ b/src/lib/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl @@ -1,5 +1,5 @@ -using ..NDTensors: data, inds using ITensors: ITensor +using ..NDTensors: data, inds # TODO: Delete this, it is a hack to decide # if an Index is blocked. @@ -34,21 +34,35 @@ function to_nameddimsarray(x::DiagTensor) return named(DiagonalArray(data(x), size(x)), name.(inds(x))) end -using ..NDTensors: BlockSparseTensor +using ITensors: ITensors, dir, qn +using ..NDTensors: BlockSparseTensor, array, blockdim, datatype, nblocks, nzblocks using ..NDTensors.BlockSparseArrays: BlockSparseArray +using ..NDTensors.BlockSparseArrays.BlockArrays: BlockArrays, blockedrange +using ..NDTensors.GradedAxes: dual, gradedrange +using ..NDTensors.TypeParameterAccessors: set_ndims # TODO: Delete once `BlockSparse` is removed. function to_nameddimsarray(x::BlockSparseTensor) - blockinds = map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds(x)) + blockinds = map(inds(x)) do i + r = gradedrange([qn(i, b) => blockdim(i, b) for b in 1:nblocks(i)]) + if dir(i) == ITensors.In + return dual(r) + end + return r + end blocktype = set_ndims(datatype(x), ndims(x)) # TODO: Make a simpler constructor: # BlockSparseArray(blocktype, blockinds) - arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(blockinds) + arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(undef, blockinds) for b in nzblocks(x) - arraystorage[BlockArrays.Block(Tuple(b)...)] = x[b] + arraystorage[BlockArrays.Block(Int.(Tuple(b))...)] = array(x[b]) end return named(arraystorage, name.(inds(x))) end +using ITensors: QN +using ..NDTensors.GradedAxes: GradedAxes +GradedAxes.fuse_labels(l1::QN, l2::QN) = l1 + l2 + ## TODO: Add this back, define `CombinerArrays` library in NDTensors! ## using ..NDTensors: CombinerTensor, CombinerArray, storage ## # TODO: Delete when we directly use `CombinerArray` as storage.