Skip to content

Commit

Permalink
[BlockSparseArrays] Zero dimensional block sparse array and some fixe…
Browse files Browse the repository at this point in the history
…s for Adjoint and PermutedDimsArray (#1574)

* [BlockSparseArrays] Zero dimensional block sparse array and some fixes for Adjoint and PermutedDimsArray

* [NDTensors] Bump to v0.3.59
  • Loading branch information
mtfishman authored Nov 9, 2024
1 parent f9b6309 commit 49c1202
Show file tree
Hide file tree
Showing 11 changed files with 159 additions and 6 deletions.
2 changes: 1 addition & 1 deletion NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <[email protected]>"]
version = "0.3.58"
version = "0.3.59"

[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ function Base.show(
io::IO, mime::MIME"text/plain", a::Adjoint{<:Any,<:BlockSparseMatrix}; kwargs...
)
axes_a = axes(a)
a_nondual = BlockSparseArray(blocks(a'), dual.(nondual.(axes(a))))'
a_nondual = BlockSparseArray(blocks(a'), dual.(nondual.(axes(a'))))'
return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...)
end

Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
@eval module $(gensym())
using Compat: Returns
using Test: @test, @testset
using BlockArrays:
AbstractBlockArray, Block, BlockedOneTo, blockedrange, blocklengths, blocksize
Expand Down Expand Up @@ -287,6 +286,15 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64})
@test ax isa typeof(dual(r))
end

@test !isdual(axes(a, 1))
@test !isdual(axes(a, 2))
@test isdual(axes(a', 1))
@test isdual(axes(a', 2))
@test isdual(axes(b, 1))
@test isdual(axes(b, 2))
@test isdual(axes(copy(a'), 1))
@test isdual(axes(copy(a'), 2))

I = [Block(1)[1:1]]
@test size(b[I, :]) == (1, 4)
@test size(b[:, I]) == (4, 1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ include("abstractblocksparsearray/arraylayouts.jl")
include("abstractblocksparsearray/sparsearrayinterface.jl")
include("abstractblocksparsearray/broadcast.jl")
include("abstractblocksparsearray/map.jl")
include("abstractblocksparsearray/linearalgebra.jl")
include("blocksparsearray/defaults.jl")
include("blocksparsearray/blocksparsearray.jl")
include("BlockArraysSparseArrayInterfaceExt/BlockArraysSparseArrayInterfaceExt.jl")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,16 @@ function blockstype(arraytype::Type{<:AbstractBlockSparseArray{T,N}}) where {T,N
return SparseArrayDOK{AbstractArray{T,N},N}
end

## # Specialized in order to fix ambiguity error with `BlockArrays`.
# Specialized in order to fix ambiguity error with `BlockArrays`.
function Base.getindex(a::AbstractBlockSparseArray{<:Any,N}, I::Vararg{Int,N}) where {N}
return blocksparse_getindex(a, I...)
end

# Specialized in order to fix ambiguity error with `BlockArrays`.
function Base.getindex(a::AbstractBlockSparseArray{<:Any,0})
return blocksparse_getindex(a)
end

## # Fix ambiguity error with `BlockArrays`.
## function Base.getindex(a::AbstractBlockSparseArray{<:Any,N}, I::Block{N}) where {N}
## return ArrayLayouts.layout_getindex(a, I)
Expand All @@ -51,6 +56,12 @@ function Base.setindex!(
return a
end

# Fix ambiguity error.
function Base.setindex!(a::AbstractBlockSparseArray{<:Any,0}, value)
blocksparse_setindex!(a, value)
return a
end

function Base.setindex!(
a::AbstractBlockSparseArray{<:Any,N}, value, I::Vararg{Block{1},N}
) where {N}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
using LinearAlgebra: Adjoint, Transpose

# Like: https://github.com/JuliaLang/julia/blob/v1.11.1/stdlib/LinearAlgebra/src/transpose.jl#L184
# but also takes the dual of the axes.
# Fixes an issue raised in:
# https://github.com/ITensor/ITensors.jl/issues/1336#issuecomment-2353434147
function Base.copy(a::Adjoint{T,<:AbstractBlockSparseMatrix{T}}) where {T}
a_dest = similar(parent(a), axes(a))
a_dest .= a
return a_dest
end

# More efficient than the generic `LinearAlgebra` version.
function Base.copy(a::Transpose{T,<:AbstractBlockSparseMatrix{T}}) where {T}
a_dest = similar(parent(a), axes(a))
a_dest .= a
return a_dest
end
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,10 @@ function Base.getindex(
)
return ArrayLayouts.layout_getindex(a, I...)
end
# Fixes ambiguity error.
function Base.getindex(a::BlockSparseArrayLike{<:Any,0})
return ArrayLayouts.layout_getindex(a)
end

# TODO: Define `blocksparse_isassigned`.
function Base.isassigned(
Expand All @@ -100,6 +104,11 @@ function Base.isassigned(
return isassigned(blocks(a), Int.(index)...)
end

# Fix ambiguity error.
function Base.isassigned(a::BlockSparseArrayLike{<:Any,0})
return isassigned(blocks(a))
end

function Base.isassigned(a::BlockSparseArrayLike{<:Any,N}, index::Block{N}) where {N}
return isassigned(a, Tuple(index)...)
end
Expand Down Expand Up @@ -211,6 +220,11 @@ function Base.similar(
return blocksparse_similar(a, elt, axes)
end

# Fixes ambiguity error.
function Base.similar(a::BlockSparseArrayLike{<:Any,0}, elt::Type, axes::Tuple{})
return blocksparse_similar(a, elt, axes)
end

# Fixes ambiguity error with `BlockArrays`.
function Base.similar(
a::BlockSparseArrayLike,
Expand Down Expand Up @@ -259,3 +273,22 @@ function Base.similar(
)
return blocksparse_similar(a, elt, axes)
end

# TODO: Implement this in a more generic way using a smarter `copyto!`,
# which is ultimately what `Array{T,N}(::AbstractArray{<:Any,N})` calls.
# These are defined for now to avoid scalar indexing issues when there
# are blocks on GPU.
function Base.Array{T,N}(a::BlockSparseArrayLike{<:Any,N}) where {T,N}
# First make it dense, then move to CPU.
# Directly copying to CPU causes some issues with
# scalar indexing on GPU which we have to investigate.
a_dest = similartype(blocktype(a), T)(undef, size(a))
a_dest .= a
return Array{T,N}(a_dest)
end
function Base.Array{T}(a::BlockSparseArrayLike) where {T}
return Array{T,ndims(a)}(a)
end
function Base.Array(a::BlockSparseArrayLike)
return Array{eltype(a)}(a)
end
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ function BlockSparseArray{T,N}(axes::Tuple{Vararg{AbstractUnitRange,N}}) where {
return BlockSparseArray{T,N,default_arraytype(T, axes)}(axes)
end

function BlockSparseArray{T,0}(axes::Tuple{}) where {T}
return BlockSparseArray{T,0,default_arraytype(T, axes)}(axes)
end

function BlockSparseArray{T,N}(dims::Tuple{Vararg{Vector{Int},N}}) where {T,N}
return BlockSparseArray{T,N}(blockedrange.(dims))
end
Expand All @@ -84,6 +88,10 @@ function BlockSparseArray{T}(axes::Tuple{Vararg{AbstractUnitRange}}) where {T}
return BlockSparseArray{T,length(axes)}(axes)
end

function BlockSparseArray{T}(axes::Tuple{}) where {T}
return BlockSparseArray{T,length(axes)}(axes)
end

function BlockSparseArray{T}(dims::Vararg{Vector{Int}}) where {T}
return BlockSparseArray{T}(dims)
end
Expand All @@ -92,6 +100,10 @@ function BlockSparseArray{T}(axes::Vararg{AbstractUnitRange}) where {T}
return BlockSparseArray{T}(axes)
end

function BlockSparseArray{T}() where {T}
return BlockSparseArray{T}(())
end

function BlockSparseArray{T,N,A}(
::UndefInitializer, dims::Tuple
) where {T,N,A<:AbstractArray{T,N}}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,13 @@ function blocksparse_getindex(a::AbstractArray{<:Any,N}, I::Vararg{Int,N}) where
return a[findblockindex.(axes(a), I)...]
end

# Fix ambiguity error.
function blocksparse_getindex(a::AbstractArray{<:Any,0})
# TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430
# is fixed.
return a[BlockIndex{0,Tuple{},Tuple{}}((), ())]
end

# a[1:2, 1:2]
# TODO: This definition means that the result of slicing a block sparse array
# with a non-blocked unit range is blocked. We may want to change that behavior,
Expand Down Expand Up @@ -77,6 +84,14 @@ function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::Vararg{Int,N
return a
end

# Fix ambiguity error.
function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value)
# TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430
# is fixed.
a[BlockIndex{0,Tuple{},Tuple{}}((), ())] = value
return a
end

function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N}) where {N}
i = Int.(Tuple(block(I)))
a_b = blocks(a)[i...]
Expand All @@ -86,6 +101,15 @@ function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N
return a
end

# Fix ambiguity error.
function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value, I::BlockIndex{0})
a_b = blocks(a)[]
a_b[] = value
# Set the block, required if it is structurally zero.
blocks(a)[] = a_b
return a
end

function blocksparse_fill!(a::AbstractArray, value)
for b in BlockRange(a)
# We can't use:
Expand Down Expand Up @@ -119,7 +143,8 @@ end
using ..SparseArrayInterface:
SparseArrayInterface, AbstractSparseArray, AbstractSparseMatrix

_perm(::PermutedDimsArray{<:Any,<:Any,P}) where {P} = P
_perm(::PermutedDimsArray{<:Any,<:Any,perm}) where {perm} = perm
_invperm(::PermutedDimsArray{<:Any,<:Any,<:Any,invperm}) where {invperm} = invperm
_getindices(t::Tuple, indices) = map(i -> t[i], indices)
_getindices(i::CartesianIndex, indices) = CartesianIndex(_getindices(Tuple(i), indices))

Expand All @@ -140,7 +165,7 @@ function Base.getindex(
a::SparsePermutedDimsArrayBlocks{<:Any,N}, index::Vararg{Int,N}
) where {N}
return PermutedDimsArray(
blocks(parent(a.array))[_getindices(index, _perm(a.array))...], _perm(a.array)
blocks(parent(a.array))[_getindices(index, _invperm(a.array))...], _perm(a.array)
)
end
function SparseArrayInterface.stored_indices(a::SparsePermutedDimsArrayBlocks)
Expand Down
40 changes: 40 additions & 0 deletions NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,37 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype

a[3, 3] = NaN
@test isnan(norm(a))

# Empty constructor
for a in (dev(BlockSparseArray{elt}()), dev(BlockSparseArray{elt}(undef)))
@test size(a) == ()
@test isone(length(a))
@test blocksize(a) == ()
@test blocksizes(a) == fill(())
@test iszero(block_nstored(a))
@test iszero(@allowscalar(a[]))
@test iszero(@allowscalar(a[CartesianIndex()]))
@test a[Block()] == dev(fill(0))
@test iszero(@allowscalar(a[Block()][]))
# Broken:
## @test b[Block()[]] == 2
for b in (
(b = copy(a); @allowscalar b[] = 2; b),
(b = copy(a); @allowscalar b[CartesianIndex()] = 2; b),
)
@test size(b) == ()
@test isone(length(b))
@test blocksize(b) == ()
@test blocksizes(b) == fill(())
@test isone(block_nstored(b))
@test @allowscalar(b[]) == 2
@test @allowscalar(b[CartesianIndex()]) == 2
@test b[Block()] == dev(fill(2))
@test @allowscalar(b[Block()][]) == 2
# Broken:
## @test b[Block()[]] == 2
end
end
end
@testset "Tensor algebra" begin
a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4])))
Expand Down Expand Up @@ -266,6 +297,15 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
@test block_nstored(b) == 2
@test nstored(b) == 2 * 4 + 3 * 3

a = dev(BlockSparseArray{elt}([1, 1, 1], [1, 2, 3], [2, 2, 1], [1, 2, 1]))
a[Block(3, 2, 2, 3)] = dev(randn(elt, 1, 2, 2, 1))
perm = (2, 3, 4, 1)
for b in (PermutedDimsArray(a, perm), permutedims(a, perm))
@test Array(b) == permutedims(Array(a), perm)
@test issetequal(block_stored_indices(b), [Block(2, 2, 3, 3)])
@test @allowscalar b[Block(2, 2, 3, 3)] == permutedims(a[Block(3, 2, 2, 3)], perm)
end

a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))
@views for b in [Block(1, 2), Block(2, 1)]
a[b] = randn(elt, size(a[b]))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,11 @@ function sparse_getindex(a::AbstractArray, I::Vararg{Int})
return sparse_getindex(a, CartesianIndex(I))
end

# Fix ambiguity error.
function sparse_getindex(a::AbstractArray{<:Any,0})
return sparse_getindex(a, CartesianIndex())
end

# Linear indexing
function sparse_getindex(a::AbstractArray, I::CartesianIndex{1})
return sparse_getindex(a, CartesianIndices(a)[I])
Expand Down

2 comments on commit 49c1202

@mtfishman
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register subdir=NDTensors

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/119071

Tip: Release Notes

Did you know you can add release notes too? Just add markdown formatted text underneath the comment after the text
"Release notes:" and it will be added to the registry PR, and if TagBot is installed it will also be added to the
release that TagBot creates. i.e.

@JuliaRegistrator register

Release notes:

## Breaking changes

- blah

To add them here just re-invoke and the PR will be updated.

Tagging

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a NDTensors-v0.3.59 -m "<description of version>" 49c12021739e193c56abef17711d29c2e41e4cdd
git push origin NDTensors-v0.3.59

Please sign in to comment.