diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 58c258383f..cde3745e2a 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.58" +version = "0.3.59" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl index 3a815d4ea2..85fcaab49f 100644 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl +++ b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/src/BlockSparseArraysGradedAxesExt.jl @@ -138,7 +138,7 @@ function Base.show( io::IO, mime::MIME"text/plain", a::Adjoint{<:Any,<:BlockSparseMatrix}; kwargs... ) axes_a = axes(a) - a_nondual = BlockSparseArray(blocks(a'), dual.(nondual.(axes(a))))' + a_nondual = BlockSparseArray(blocks(a'), dual.(nondual.(axes(a'))))' return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...) end diff --git a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl index 41f7fc3478..11c40d10dc 100644 --- a/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl +++ b/NDTensors/src/lib/BlockSparseArrays/ext/BlockSparseArraysGradedAxesExt/test/runtests.jl @@ -1,5 +1,4 @@ @eval module $(gensym()) -using Compat: Returns using Test: @test, @testset using BlockArrays: AbstractBlockArray, Block, BlockedOneTo, blockedrange, blocklengths, blocksize @@ -287,6 +286,15 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test ax isa typeof(dual(r)) end + @test !isdual(axes(a, 1)) + @test !isdual(axes(a, 2)) + @test isdual(axes(a', 1)) + @test isdual(axes(a', 2)) + @test isdual(axes(b, 1)) + @test isdual(axes(b, 2)) + @test isdual(axes(copy(a'), 1)) + @test isdual(axes(copy(a'), 2)) + I = [Block(1)[1:1]] @test size(b[I, :]) == (1, 4) @test size(b[:, I]) == (4, 1) diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl index af59ae7f51..d0a1e4cdd7 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl @@ -16,6 +16,7 @@ include("abstractblocksparsearray/arraylayouts.jl") include("abstractblocksparsearray/sparsearrayinterface.jl") include("abstractblocksparsearray/broadcast.jl") include("abstractblocksparsearray/map.jl") +include("abstractblocksparsearray/linearalgebra.jl") include("blocksparsearray/defaults.jl") include("blocksparsearray/blocksparsearray.jl") include("BlockArraysSparseArrayInterfaceExt/BlockArraysSparseArrayInterfaceExt.jl") diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl index 0e9fc68386..9ecbb252fd 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/abstractblocksparsearray.jl @@ -22,11 +22,16 @@ function blockstype(arraytype::Type{<:AbstractBlockSparseArray{T,N}}) where {T,N return SparseArrayDOK{AbstractArray{T,N},N} end -## # Specialized in order to fix ambiguity error with `BlockArrays`. +# Specialized in order to fix ambiguity error with `BlockArrays`. function Base.getindex(a::AbstractBlockSparseArray{<:Any,N}, I::Vararg{Int,N}) where {N} return blocksparse_getindex(a, I...) end +# Specialized in order to fix ambiguity error with `BlockArrays`. +function Base.getindex(a::AbstractBlockSparseArray{<:Any,0}) + return blocksparse_getindex(a) +end + ## # Fix ambiguity error with `BlockArrays`. ## function Base.getindex(a::AbstractBlockSparseArray{<:Any,N}, I::Block{N}) where {N} ## return ArrayLayouts.layout_getindex(a, I) @@ -51,6 +56,12 @@ function Base.setindex!( return a end +# Fix ambiguity error. +function Base.setindex!(a::AbstractBlockSparseArray{<:Any,0}, value) + blocksparse_setindex!(a, value) + return a +end + function Base.setindex!( a::AbstractBlockSparseArray{<:Any,N}, value, I::Vararg{Block{1},N} ) where {N} diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/linearalgebra.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/linearalgebra.jl new file mode 100644 index 0000000000..144ea47593 --- /dev/null +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/linearalgebra.jl @@ -0,0 +1,18 @@ +using LinearAlgebra: Adjoint, Transpose + +# Like: https://github.com/JuliaLang/julia/blob/v1.11.1/stdlib/LinearAlgebra/src/transpose.jl#L184 +# but also takes the dual of the axes. +# Fixes an issue raised in: +# https://github.com/ITensor/ITensors.jl/issues/1336#issuecomment-2353434147 +function Base.copy(a::Adjoint{T,<:AbstractBlockSparseMatrix{T}}) where {T} + a_dest = similar(parent(a), axes(a)) + a_dest .= a + return a_dest +end + +# More efficient than the generic `LinearAlgebra` version. +function Base.copy(a::Transpose{T,<:AbstractBlockSparseMatrix{T}}) where {T} + a_dest = similar(parent(a), axes(a)) + a_dest .= a + return a_dest +end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 1970127923..dfb797caaa 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -92,6 +92,10 @@ function Base.getindex( ) return ArrayLayouts.layout_getindex(a, I...) end +# Fixes ambiguity error. +function Base.getindex(a::BlockSparseArrayLike{<:Any,0}) + return ArrayLayouts.layout_getindex(a) +end # TODO: Define `blocksparse_isassigned`. function Base.isassigned( @@ -100,6 +104,11 @@ function Base.isassigned( return isassigned(blocks(a), Int.(index)...) end +# Fix ambiguity error. +function Base.isassigned(a::BlockSparseArrayLike{<:Any,0}) + return isassigned(blocks(a)) +end + function Base.isassigned(a::BlockSparseArrayLike{<:Any,N}, index::Block{N}) where {N} return isassigned(a, Tuple(index)...) end @@ -211,6 +220,11 @@ function Base.similar( return blocksparse_similar(a, elt, axes) end +# Fixes ambiguity error. +function Base.similar(a::BlockSparseArrayLike{<:Any,0}, elt::Type, axes::Tuple{}) + return blocksparse_similar(a, elt, axes) +end + # Fixes ambiguity error with `BlockArrays`. function Base.similar( a::BlockSparseArrayLike, @@ -259,3 +273,22 @@ function Base.similar( ) return blocksparse_similar(a, elt, axes) end + +# TODO: Implement this in a more generic way using a smarter `copyto!`, +# which is ultimately what `Array{T,N}(::AbstractArray{<:Any,N})` calls. +# These are defined for now to avoid scalar indexing issues when there +# are blocks on GPU. +function Base.Array{T,N}(a::BlockSparseArrayLike{<:Any,N}) where {T,N} + # First make it dense, then move to CPU. + # Directly copying to CPU causes some issues with + # scalar indexing on GPU which we have to investigate. + a_dest = similartype(blocktype(a), T)(undef, size(a)) + a_dest .= a + return Array{T,N}(a_dest) +end +function Base.Array{T}(a::BlockSparseArrayLike) where {T} + return Array{T,ndims(a)}(a) +end +function Base.Array(a::BlockSparseArrayLike) + return Array{eltype(a)}(a) +end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl index 325cc5a7e3..50d52a59ca 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray/blocksparsearray.jl @@ -72,6 +72,10 @@ function BlockSparseArray{T,N}(axes::Tuple{Vararg{AbstractUnitRange,N}}) where { return BlockSparseArray{T,N,default_arraytype(T, axes)}(axes) end +function BlockSparseArray{T,0}(axes::Tuple{}) where {T} + return BlockSparseArray{T,0,default_arraytype(T, axes)}(axes) +end + function BlockSparseArray{T,N}(dims::Tuple{Vararg{Vector{Int},N}}) where {T,N} return BlockSparseArray{T,N}(blockedrange.(dims)) end @@ -84,6 +88,10 @@ function BlockSparseArray{T}(axes::Tuple{Vararg{AbstractUnitRange}}) where {T} return BlockSparseArray{T,length(axes)}(axes) end +function BlockSparseArray{T}(axes::Tuple{}) where {T} + return BlockSparseArray{T,length(axes)}(axes) +end + function BlockSparseArray{T}(dims::Vararg{Vector{Int}}) where {T} return BlockSparseArray{T}(dims) end @@ -92,6 +100,10 @@ function BlockSparseArray{T}(axes::Vararg{AbstractUnitRange}) where {T} return BlockSparseArray{T}(axes) end +function BlockSparseArray{T}() where {T} + return BlockSparseArray{T}(()) +end + function BlockSparseArray{T,N,A}( ::UndefInitializer, dims::Tuple ) where {T,N,A<:AbstractArray{T,N}} diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index d4451334c9..fbb5f220f4 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -22,6 +22,13 @@ function blocksparse_getindex(a::AbstractArray{<:Any,N}, I::Vararg{Int,N}) where return a[findblockindex.(axes(a), I)...] end +# Fix ambiguity error. +function blocksparse_getindex(a::AbstractArray{<:Any,0}) + # TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430 + # is fixed. + return a[BlockIndex{0,Tuple{},Tuple{}}((), ())] +end + # a[1:2, 1:2] # TODO: This definition means that the result of slicing a block sparse array # with a non-blocked unit range is blocked. We may want to change that behavior, @@ -77,6 +84,14 @@ function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::Vararg{Int,N return a end +# Fix ambiguity error. +function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value) + # TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430 + # is fixed. + a[BlockIndex{0,Tuple{},Tuple{}}((), ())] = value + return a +end + function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N}) where {N} i = Int.(Tuple(block(I))) a_b = blocks(a)[i...] @@ -86,6 +101,15 @@ function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N return a end +# Fix ambiguity error. +function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value, I::BlockIndex{0}) + a_b = blocks(a)[] + a_b[] = value + # Set the block, required if it is structurally zero. + blocks(a)[] = a_b + return a +end + function blocksparse_fill!(a::AbstractArray, value) for b in BlockRange(a) # We can't use: @@ -119,7 +143,8 @@ end using ..SparseArrayInterface: SparseArrayInterface, AbstractSparseArray, AbstractSparseMatrix -_perm(::PermutedDimsArray{<:Any,<:Any,P}) where {P} = P +_perm(::PermutedDimsArray{<:Any,<:Any,perm}) where {perm} = perm +_invperm(::PermutedDimsArray{<:Any,<:Any,<:Any,invperm}) where {invperm} = invperm _getindices(t::Tuple, indices) = map(i -> t[i], indices) _getindices(i::CartesianIndex, indices) = CartesianIndex(_getindices(Tuple(i), indices)) @@ -140,7 +165,7 @@ function Base.getindex( a::SparsePermutedDimsArrayBlocks{<:Any,N}, index::Vararg{Int,N} ) where {N} return PermutedDimsArray( - blocks(parent(a.array))[_getindices(index, _perm(a.array))...], _perm(a.array) + blocks(parent(a.array))[_getindices(index, _invperm(a.array))...], _perm(a.array) ) end function SparseArrayInterface.stored_indices(a::SparsePermutedDimsArrayBlocks) diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index aa55bd6815..57de184afe 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -109,6 +109,37 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[3, 3] = NaN @test isnan(norm(a)) + + # Empty constructor + for a in (dev(BlockSparseArray{elt}()), dev(BlockSparseArray{elt}(undef))) + @test size(a) == () + @test isone(length(a)) + @test blocksize(a) == () + @test blocksizes(a) == fill(()) + @test iszero(block_nstored(a)) + @test iszero(@allowscalar(a[])) + @test iszero(@allowscalar(a[CartesianIndex()])) + @test a[Block()] == dev(fill(0)) + @test iszero(@allowscalar(a[Block()][])) + # Broken: + ## @test b[Block()[]] == 2 + for b in ( + (b = copy(a); @allowscalar b[] = 2; b), + (b = copy(a); @allowscalar b[CartesianIndex()] = 2; b), + ) + @test size(b) == () + @test isone(length(b)) + @test blocksize(b) == () + @test blocksizes(b) == fill(()) + @test isone(block_nstored(b)) + @test @allowscalar(b[]) == 2 + @test @allowscalar(b[CartesianIndex()]) == 2 + @test b[Block()] == dev(fill(2)) + @test @allowscalar(b[Block()][]) == 2 + # Broken: + ## @test b[Block()[]] == 2 + end + end end @testset "Tensor algebra" begin a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) @@ -266,6 +297,15 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test block_nstored(b) == 2 @test nstored(b) == 2 * 4 + 3 * 3 + a = dev(BlockSparseArray{elt}([1, 1, 1], [1, 2, 3], [2, 2, 1], [1, 2, 1])) + a[Block(3, 2, 2, 3)] = dev(randn(elt, 1, 2, 2, 1)) + perm = (2, 3, 4, 1) + for b in (PermutedDimsArray(a, perm), permutedims(a, perm)) + @test Array(b) == permutedims(Array(a), perm) + @test issetequal(block_stored_indices(b), [Block(2, 2, 3, 3)]) + @test @allowscalar b[Block(2, 2, 3, 3)] == permutedims(a[Block(3, 2, 2, 3)], perm) + end + a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] a[b] = randn(elt, size(a[b])) diff --git a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl index 1a9a73060c..216c03e7ac 100644 --- a/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl +++ b/NDTensors/src/lib/SparseArrayInterface/src/sparsearrayinterface/indexing.jl @@ -82,6 +82,11 @@ function sparse_getindex(a::AbstractArray, I::Vararg{Int}) return sparse_getindex(a, CartesianIndex(I)) end +# Fix ambiguity error. +function sparse_getindex(a::AbstractArray{<:Any,0}) + return sparse_getindex(a, CartesianIndex()) +end + # Linear indexing function sparse_getindex(a::AbstractArray, I::CartesianIndex{1}) return sparse_getindex(a, CartesianIndices(a)[I])