From 8a85fbebf39cc03f068eda558db1d563095910c8 Mon Sep 17 00:00:00 2001 From: Daniel Karrasch Date: Mon, 21 Oct 2024 15:56:40 +0200 Subject: [PATCH 1/9] Inline sparse-times-dense in-place multiplication --- src/linalg.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/linalg.jl b/src/linalg.jl index ca70ac4c..c4d03f97 100644 --- a/src/linalg.jl +++ b/src/linalg.jl @@ -47,11 +47,11 @@ for op ∈ (:+, :-) end end -generic_matmatmul!(C::StridedMatrix, tA, tB, A::SparseMatrixCSCUnion2, B::DenseMatrixUnion, _add::MulAddMul) = +@inline generic_matmatmul!(C::StridedMatrix, tA, tB, A::SparseMatrixCSCUnion2, B::DenseMatrixUnion, _add::MulAddMul) = spdensemul!(C, tA, tB, A, B, _add) -generic_matmatmul!(C::StridedMatrix, tA, tB, A::SparseMatrixCSCUnion2, B::AbstractTriangular, _add::MulAddMul) = +@inline generic_matmatmul!(C::StridedMatrix, tA, tB, A::SparseMatrixCSCUnion2, B::AbstractTriangular, _add::MulAddMul) = spdensemul!(C, tA, tB, A, B, _add) -generic_matvecmul!(C::StridedVecOrMat, tA, A::SparseMatrixCSCUnion2, B::DenseInputVector, _add::MulAddMul) = +@inline generic_matvecmul!(C::StridedVecOrMat, tA, A::SparseMatrixCSCUnion2, B::DenseInputVector, _add::MulAddMul) = spdensemul!(C, tA, 'N', A, B, _add) Base.@constprop :aggressive function spdensemul!(C, tA, tB, A, B, _add) From aa66624392ee93246ed889ca1ce824b4ac48ab53 Mon Sep 17 00:00:00 2001 From: CyHan Date: Sat, 26 Oct 2024 22:29:08 +0800 Subject: [PATCH 2/9] doc: move solvers doc to `src\solvers.md` (#576) --- docs/src/index.md | 5 +++++ docs/src/solvers.md | 27 +++++++++++++++------------ 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/docs/src/index.md b/docs/src/index.md index 3868d041..770301ab 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -206,6 +206,11 @@ section of the standard library reference. | [`sprandn(m,n,d)`](@ref) | [`randn(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the standard normal (Gaussian) distribution. | | [`sprandn(rng,m,n,d)`](@ref) | [`randn(rng,m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements generated with the `rng` random number generator | + +```@meta +DocTestSetup = nothing +``` + # [SparseArrays API](@id stdlib-sparse-arrays) ```@docs diff --git a/docs/src/solvers.md b/docs/src/solvers.md index e633be9d..a5f2aeab 100644 --- a/docs/src/solvers.md +++ b/docs/src/solvers.md @@ -4,26 +4,29 @@ DocTestSetup = :(using LinearAlgebra, SparseArrays) ``` -Sparse matrix solvers call functions from [SuiteSparse](http://suitesparse.com). The following factorizations are available: +## [Sparse Linear Algebra](@id stdlib-sparse-linalg) -| Type | Description | -|:--------------------------------- |:--------------------------------------------- | -| `CHOLMOD.Factor` | Cholesky factorization | -| `UMFPACK.UmfpackLU` | LU factorization | -| `SPQR.QRSparse` | QR factorization | - -Other solvers such as [Pardiso.jl](https://github.com/JuliaSparse/Pardiso.jl/) are available as external packages. [Arpack.jl](https://julialinearalgebra.github.io/Arpack.jl/stable/) provides `eigs` and `svds` for iterative solution of eigensystems and singular value decompositions. +Sparse matrix solvers call functions from [SuiteSparse](http://suitesparse.com). -These factorizations are described in more detail in the -[`Linear Algebra`](https://docs.julialang.org/en/v1/stdlib/LinearAlgebra/) -section of the manual: +The following factorizations are available: 1. [`cholesky`](@ref SparseArrays.CHOLMOD.cholesky) 2. [`ldlt`](@ref SparseArrays.CHOLMOD.ldlt) 3. [`lu`](@ref SparseArrays.UMFPACK.lu) 4. [`qr`](@ref SparseArrays.SPQR.qr) -```@docs +| Type | Description | +|:--------------------------------- |:--------------------------------------------- | +| `CHOLMOD.Factor` | Cholesky factorization | +| `UMFPACK.UmfpackLU` | LU factorization | +| `SPQR.QRSparse` | QR factorization | + +Other solvers such as [Pardiso.jl](https://github.com/JuliaSparse/Pardiso.jl/) are available +as external packages. [Arpack.jl](https://julialinearalgebra.github.io/Arpack.jl/stable/) +provides `eigs` and `svds` for iterative solution of eigensystems and singular value +decompositions. + +```@docs; canonical=false SparseArrays.CHOLMOD.cholesky SparseArrays.CHOLMOD.cholesky! SparseArrays.CHOLMOD.ldlt From 8dd830082fa807c74a2c9412e1f8e648282ea6fe Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 4 Apr 2024 08:24:39 +0530 Subject: [PATCH 3/9] SparseMatrixCSC constructor with a Tuple of Integers (#523) * SparseMatrixCSC constructor with a Tuple of Integers * SparseVector constructors --------- Co-authored-by: Viral B. Shah --- src/sparsematrix.jl | 2 ++ src/sparsevector.jl | 1 + test/sparsematrix_constructors_indexing.jl | 11 +++++++---- test/sparsevector.jl | 12 ++++++++---- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/sparsematrix.jl b/src/sparsematrix.jl index 1559cd86..b9637eda 100644 --- a/src/sparsematrix.jl +++ b/src/sparsematrix.jl @@ -49,10 +49,12 @@ SparseMatrixCSC(m, n, colptr::ReadOnly, rowval::ReadOnly, nzval::Vector) = """ SparseMatrixCSC{Tv,Ti}(::UndefInitializer, m::Integer, n::Integer) + SparseMatrixCSC{Tv,Ti}(::UndefInitializer, (m,n)::NTuple{2,Integer}) Creates an empty sparse matrix with element type `Tv` and integer type `Ti` of size `m × n`. """ SparseMatrixCSC{Tv,Ti}(::UndefInitializer, m::Integer, n::Integer) where {Tv, Ti} = spzeros(Tv, Ti, m, n) +SparseMatrixCSC{Tv,Ti}(::UndefInitializer, mn::NTuple{2,Integer}) where {Tv, Ti} = spzeros(Tv, Ti, mn...) """ FixedSparseCSC{Tv,Ti<:Integer} <: AbstractSparseMatrixCSC{Tv,Ti} diff --git a/src/sparsevector.jl b/src/sparsevector.jl index 206a1e06..41498b50 100644 --- a/src/sparsevector.jl +++ b/src/sparsevector.jl @@ -50,6 +50,7 @@ SparseVector(n::Integer, nzind::Vector{Ti}, nzval::Vector{Tv}) where {Tv,Ti} = SparseVector{Tv,Ti}(n, nzind, nzval) SparseVector{Tv, Ti}(::UndefInitializer, n::Integer) where {Tv, Ti} = SparseVector{Tv, Ti}(n, Ti[], Tv[]) +SparseVector{Tv, Ti}(::UndefInitializer, (n,)::Tuple{Integer}) where {Tv, Ti} = SparseVector{Tv, Ti}(n, Ti[], Tv[]) """ FixedSparseVector{Tv,Ti<:Integer} <: AbstractCompressedVector{Tv,Ti} diff --git a/test/sparsematrix_constructors_indexing.jl b/test/sparsematrix_constructors_indexing.jl index 5824d887..b1b08dbf 100644 --- a/test/sparsematrix_constructors_indexing.jl +++ b/test/sparsematrix_constructors_indexing.jl @@ -57,10 +57,13 @@ end @test sparse([1, 1, 2, 2, 2], [1, 2, 1, 2, 2], -1.0, 2, 2, *) == sparse([1, 1, 2, 2], [1, 2, 1, 2], [-1.0, -1.0, -1.0, 1.0], 2, 2) @test sparse(sparse(Int32.(1:5), Int32.(1:5), trues(5))') isa SparseMatrixCSC{Bool,Int32} # undef initializer - m = SparseMatrixCSC{Float32, Int16}(undef, 3, 4) - @test size(m) == (3, 4) - @test eltype(m) === Float32 - @test m == spzeros(3, 4) + sz = (3, 4) + for m in (SparseMatrixCSC{Float32, Int16}(undef, sz...), SparseMatrixCSC{Float32, Int16}(undef, sz), + similar(SparseMatrixCSC{Float32, Int16}, sz)) + @test size(m) == sz + @test eltype(m) === Float32 + @test m == spzeros(sz...) + end end @testset "spzeros for pattern creation (structural zeros)" begin diff --git a/test/sparsevector.jl b/test/sparsevector.jl index 513028e5..15499e12 100644 --- a/test/sparsevector.jl +++ b/test/sparsevector.jl @@ -219,10 +219,14 @@ end end @testset "Undef initializer" begin - v = SparseVector{Float32, Int16}(undef, 4) - @test size(v) == (4, ) - @test eltype(v) === Float32 - @test v == spzeros(Float32, 4) + sz = (4,) + for v in (SparseVector{Float32, Int16}(undef, sz), + SparseVector{Float32, Int16}(undef, sz...), + similar(SparseVector{Float32, Int16}, sz)) + @test size(v) == sz + @test eltype(v) === Float32 + @test v == spzeros(Float32, sz...) + end end end ### Element access From 8a48327681c3cf49c916ae289c4c4dd821f9d541 Mon Sep 17 00:00:00 2001 From: inky Date: Sat, 25 May 2024 12:30:46 -0500 Subject: [PATCH 4/9] test: Don't use GPL module when Base.USE_GPL_LIBS=false (#535) --- test/cholmod.jl | 8 +++++--- test/linalg_solvers.jl | 8 +++++--- test/spqr.jl | 9 ++++++--- test/umfpack.jl | 8 ++++++-- 4 files changed, 22 insertions(+), 11 deletions(-) diff --git a/test/cholmod.jl b/test/cholmod.jl index e025119a..108b0ff8 100644 --- a/test/cholmod.jl +++ b/test/cholmod.jl @@ -1,8 +1,12 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license module CHOLMODTests - using Test + +@static if !Base.USE_GPL_LIBS + @info "This Julia build excludes the use of SuiteSparse GPL libraries. Skipping CHOLMOD tests" +else + using SparseArrays.CHOLMOD using SparseArrays.CHOLMOD: getcommon using Random @@ -16,8 +20,6 @@ using SparseArrays: getcolptr using SparseArrays.LibSuiteSparse using SparseArrays.LibSuiteSparse: cholmod_l_allocate_sparse, cholmod_allocate_sparse -if Base.USE_GPL_LIBS - # CHOLMOD tests itypes = sizeof(Int) == 4 ? (Int32,) : (Int32, Int64) for Ti ∈ itypes, Tv ∈ (Float32, Float64) diff --git a/test/linalg_solvers.jl b/test/linalg_solvers.jl index f8a758c7..b59659b4 100644 --- a/test/linalg_solvers.jl +++ b/test/linalg_solvers.jl @@ -1,14 +1,16 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license module SparseLinalgSolversTests - using Test + +@static if !Base.USE_GPL_LIBS + @info "This Julia build excludes the use of SuiteSparse GPL libraries. Skipping SparseLinalgSolvers Tests" +else + using SparseArrays using Random using LinearAlgebra -if Base.USE_GPL_LIBS - @testset "explicit zeros" begin a = SparseMatrixCSC(2, 2, [1, 3, 5], [1, 2, 1, 2], [1.0, 0.0, 0.0, 1.0]) @test lu(a)\[2.0, 3.0] ≈ [2.0, 3.0] diff --git a/test/spqr.jl b/test/spqr.jl index 16a72eef..b3d895c3 100644 --- a/test/spqr.jl +++ b/test/spqr.jl @@ -1,16 +1,19 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license module SPQRTests - using Test + +@static if !Base.USE_GPL_LIBS + @info "This Julia build excludes the use of SuiteSparse GPL libraries. Skipping SPQR Tests" +else + using SparseArrays.SPQR using SparseArrays.CHOLMOD using LinearAlgebra: I, istriu, norm, qr, rank, rmul!, lmul!, Adjoint, Transpose, ColumnNorm, RowMaximum, NoPivot using SparseArrays: SparseArrays, sparse, sprandn, spzeros, SparseMatrixCSC using Random: seed! -# TODO REMOVE SECOND PREDICATE WITH SS7.1 -if Base.USE_GPL_LIBS + @testset "Sparse QR" begin m, n = 100, 10 nn = 100 diff --git a/test/umfpack.jl b/test/umfpack.jl index ba65e1b2..b4bdd588 100644 --- a/test/umfpack.jl +++ b/test/umfpack.jl @@ -1,15 +1,19 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license module UMFPACKTests - using Test + +@static if !Base.USE_GPL_LIBS + @info "This Julia build excludes the use of SuiteSparse GPL libraries. Skipping UMFPACK Tests" +else + using Random using SparseArrays using Serialization using LinearAlgebra: LinearAlgebra, I, det, issuccess, ldiv!, lu, lu!, Transpose, SingularException, Diagonal, logabsdet using SparseArrays: nnz, sparse, sprand, sprandn, SparseMatrixCSC, UMFPACK, increment! -if Base.USE_GPL_LIBS + function umfpack_report(l::UMFPACK.UmfpackLU) UMFPACK.umfpack_report_numeric(l, 0) UMFPACK.umfpack_report_symbolic(l, 0) From 66d65fdbaf2e8aebaf5c35f1883bfbdbb8deb02f Mon Sep 17 00:00:00 2001 From: Ben Corbett <32752943+corbett5@users.noreply.github.com> Date: Thu, 22 Aug 2024 11:40:53 -0600 Subject: [PATCH 5/9] Change default QR tolerance to match SPQR (#557) SPQR uses just the double precision epsilon even for Float32. https://github.com/DrTimothyAldenDavis/SuiteSparse/blob/131471310ef0600b231b8fa7c10a55c3f70afbd9/SPQR/Source/spqr_tol.cpp#L29C6-L30C57 --- src/solvers/spqr.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/solvers/spqr.jl b/src/solvers/spqr.jl index f7c8189a..1654646e 100644 --- a/src/solvers/spqr.jl +++ b/src/solvers/spqr.jl @@ -146,7 +146,7 @@ Matrix{T}(Q::QRSparseQ) where {T} = lmul!(Q, Matrix{T}(I, size(Q, 1), min(size(Q # From SPQR manual p. 6 _default_tol(A::AbstractSparseMatrixCSC) = - 20*sum(size(A))*eps(real(eltype(A)))*maximum(norm(view(A, :, i)) for i in 1:size(A, 2)) + 20*sum(size(A))*eps()*maximum(norm(view(A, :, i)) for i in 1:size(A, 2)) """ qr(A::SparseMatrixCSC; tol=_default_tol(A), ordering=ORDERING_DEFAULT) -> QRSparse From 853435704212a63997360eda1ae59fb71f015738 Mon Sep 17 00:00:00 2001 From: Daniel Karrasch Date: Sat, 2 Nov 2024 10:59:14 +0100 Subject: [PATCH 6/9] Update ci.yml: run CI and docs on v1.11 --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 17f6aeca..80b3269b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: fail-fast: false matrix: version: - - 'nightly' + - '1.11' os: - ubuntu-latest - macOS-latest @@ -68,7 +68,7 @@ jobs: - uses: julia-actions/setup-julia@latest with: # version: '1.6' - version: 'nightly' + version: '1.11' - name: Generate docs run: | julia --color=yes -e 'write("Project.toml", replace(read("Project.toml", String), r"uuid = .*?\n" =>"uuid = \"3f01184e-e22b-5df5-ae63-d93ebab69eaf\"\n"));' From fcc610267f2ea8f707ea4effb90ed96be3a22739 Mon Sep 17 00:00:00 2001 From: "Viral B. Shah" Date: Sun, 4 Aug 2024 14:21:13 +0530 Subject: [PATCH 7/9] Do not use nested dissection by default. (#550) * Do not use nested dissection by default. Provide a named parameter `nested_dissection` to `symbolic()` to turn it on. Co-authored-by: Kristoffer Carlsson * Merge Sparse Linear Algebra docs into SparseArrays so that it shows in the Julia manual Consolidate all external packages in one place --------- Co-authored-by: Kristoffer Carlsson --- docs/make.jl | 1 - docs/src/index.md | 48 +++++++++++++++++++++++++++++++++ docs/src/solvers.md | 47 --------------------------------- src/solvers/cholmod.jl | 60 ++++++++++++++++++++++++------------------ 4 files changed, 82 insertions(+), 74 deletions(-) delete mode 100644 docs/src/solvers.md diff --git a/docs/make.jl b/docs/make.jl index 53843712..df9e506c 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,7 +8,6 @@ makedocs( sitename = "SparseArrays", pages = Any[ "SparseArrays" => "index.md", - "Sparse Linear Algebra" => "solvers.md", ]; warnonly = [:missing_docs, :cross_references], ) diff --git a/docs/src/index.md b/docs/src/index.md index 770301ab..dcfc729f 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -206,6 +206,22 @@ section of the standard library reference. | [`sprandn(m,n,d)`](@ref) | [`randn(m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements distributed according to the standard normal (Gaussian) distribution. | | [`sprandn(rng,m,n,d)`](@ref) | [`randn(rng,m,n)`](@ref) | Creates a *m*-by-*n* random matrix (of density *d*) with iid non-zero elements generated with the `rng` random number generator | +## [Sparse Linear Algebra](@id stdlib-sparse-linalg) + +Sparse matrix solvers call functions from [SuiteSparse](http://suitesparse.com). The following factorizations are available: + +| Type | Description | +|:----------------------|:--------------------------------------------- | +| `CHOLMOD.Factor` | Cholesky and LDLt factorizations | +| `UMFPACK.UmfpackLU` | LU factorization | +| `SPQR.QRSparse` | QR factorization | + +These factorizations are described in more detail in the [Sparse Linear Algebra API section](@ref stdlib-sparse-linalg-api): + +1. [`cholesky`](@ref SparseArrays.CHOLMOD.cholesky) +2. [`ldlt`](@ref SparseArrays.CHOLMOD.ldlt) +3. [`lu`](@ref SparseArrays.UMFPACK.lu) +4. [`qr`](@ref SparseArrays.SPQR.qr) ```@meta DocTestSetup = nothing @@ -250,6 +266,26 @@ SparseArrays.ftranspose! ```@meta DocTestSetup = nothing ``` + +# [Sparse Linear Algebra API](@id stdlib-sparse-linalg-api) + +```@docs +SparseArrays.CHOLMOD.cholesky +SparseArrays.CHOLMOD.cholesky! +SparseArrays.CHOLMOD.lowrankupdate +SparseArrays.CHOLMOD.lowrankupdate! +SparseArrays.CHOLMOD.lowrankdowndate +SparseArrays.CHOLMOD.lowrankdowndate! +SparseArrays.CHOLMOD.lowrankupdowndate! +SparseArrays.CHOLMOD.ldlt +SparseArrays.UMFPACK.lu +SparseArrays.SPQR.qr +``` + +```@meta +DocTestSetup = nothing +``` + # Noteworthy External Sparse Packages Several other Julia packages provide sparse matrix implementations that should be mentioned: @@ -269,3 +305,15 @@ Several other Julia packages provide sparse matrix implementations that should b 7. [ExtendableSparse.jl](https://github.com/j-fu/ExtendableSparse.jl) enables fast insertion into sparse matrices using a lazy approach to new stored indices. 8. [Finch.jl](https://github.com/willow-ahrens/Finch.jl) supports extensive multidimensional sparse array formats and operations through a mini tensor language and compiler, all in native Julia. Support for COO, CSF, CSR, CSC and more, as well as operations like broadcast, reduce, etc. and custom operations. + +External packages providing sparse direct solvers: +1. [KLU.jl](https://github.com/JuliaSparse/KLU.jl) +2. [Pardiso.jl](https://github.com/JuliaSparse/Pardiso.jl/) + +External packages providing solvers for iterative solution of eigensystems and singular value decompositions: +1. [ArnoldiMethods.jl](https://github.com/JuliaLinearAlgebra/ArnoldiMethod.jl) +2. [KrylovKit](https://github.com/Jutho/KrylovKit.jl) +3. [Arpack.jl](https://github.com/JuliaLinearAlgebra/Arpack.jl) + +External packages for working with graphs: +1. [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl) diff --git a/docs/src/solvers.md b/docs/src/solvers.md deleted file mode 100644 index a5f2aeab..00000000 --- a/docs/src/solvers.md +++ /dev/null @@ -1,47 +0,0 @@ -# Sparse Linear Algebra - -```@meta -DocTestSetup = :(using LinearAlgebra, SparseArrays) -``` - -## [Sparse Linear Algebra](@id stdlib-sparse-linalg) - -Sparse matrix solvers call functions from [SuiteSparse](http://suitesparse.com). - -The following factorizations are available: - -1. [`cholesky`](@ref SparseArrays.CHOLMOD.cholesky) -2. [`ldlt`](@ref SparseArrays.CHOLMOD.ldlt) -3. [`lu`](@ref SparseArrays.UMFPACK.lu) -4. [`qr`](@ref SparseArrays.SPQR.qr) - -| Type | Description | -|:--------------------------------- |:--------------------------------------------- | -| `CHOLMOD.Factor` | Cholesky factorization | -| `UMFPACK.UmfpackLU` | LU factorization | -| `SPQR.QRSparse` | QR factorization | - -Other solvers such as [Pardiso.jl](https://github.com/JuliaSparse/Pardiso.jl/) are available -as external packages. [Arpack.jl](https://julialinearalgebra.github.io/Arpack.jl/stable/) -provides `eigs` and `svds` for iterative solution of eigensystems and singular value -decompositions. - -```@docs; canonical=false -SparseArrays.CHOLMOD.cholesky -SparseArrays.CHOLMOD.cholesky! -SparseArrays.CHOLMOD.ldlt -SparseArrays.SPQR.qr -SparseArrays.UMFPACK.lu -``` - -```@docs -SparseArrays.CHOLMOD.lowrankupdate -SparseArrays.CHOLMOD.lowrankupdate! -SparseArrays.CHOLMOD.lowrankdowndate -SparseArrays.CHOLMOD.lowrankdowndate! -SparseArrays.CHOLMOD.lowrankupdowndate! -``` - -```@meta -DocTestSetup = nothing -``` diff --git a/src/solvers/cholmod.jl b/src/solvers/cholmod.jl index 117d801b..e155e72f 100644 --- a/src/solvers/cholmod.jl +++ b/src/solvers/cholmod.jl @@ -795,7 +795,7 @@ function ssmult(A::Sparse{Tv1, Ti1}, B::Sparse{Tv2, Ti2}, stype::Integer, A, B = convert.(Sparse{promote_type(Tv1, Tv2), promote_type(Ti1, Ti2)}, (A, B)) return ssmult(A, B, stype, values, sorted) end -function horzcat(A::Sparse{Tv1, Ti1}, B::Sparse{Tv2, Ti2}, values::Bool) where +function horzcat(A::Sparse{Tv1, Ti1}, B::Sparse{Tv2, Ti2}, values::Bool) where {Tv1<:VRealTypes, Tv2<:VRealTypes, Ti1, Ti2} A, B = convert.(Sparse{promote_type(Tv1, Tv2), promote_type(Ti1, Ti2)}, (A, B)) return horzcat(A, B, values) @@ -809,7 +809,7 @@ function sdmult!(A::Sparse{Tv1, Ti}, transpose::Bool, A, X = convert(Sparse{Tv3, Ti}, A), convert(Dense{Tv3}, X) return sdmult!(A, transpose, α, β, X, Y) end -function vertcat(A::Sparse{Tv1, Ti1}, B::Sparse{Tv2, Ti2}, values::Bool) where +function vertcat(A::Sparse{Tv1, Ti1}, B::Sparse{Tv2, Ti2}, values::Bool) where {Tv1<:VRealTypes, Ti1, Tv2<:VRealTypes, Ti2} A, B = convert.(Sparse{promote_type(Tv1, Tv2), promote_type(Ti1, Ti2)}, (A, B)) return vertcat(A, B, values) @@ -895,7 +895,7 @@ function Dense(A::StridedVecOrMatInclAdjAndTrans) return Dense{T}(A) end # Don't always promote to Float64 now that we have Float32 support. -Dense(A::StridedVecOrMatInclAdjAndTrans{T}) where +Dense(A::StridedVecOrMatInclAdjAndTrans{T}) where {T<:Union{Float16, ComplexF16, Float32, ComplexF32}} = Dense{promote_type(T, Float32)}(A) @@ -1055,8 +1055,8 @@ Sparse(A::Hermitian{Tv, SparseMatrixCSC{Tv,Ti}}) where {Tv, Ti} = Sparse{promote_type(Tv, Float64), Ti <: ITypes ? Ti : promote_type(Ti, Int)}( A.data, A.uplo == 'L' ? -1 : 1 ) -Sparse(A::Hermitian{Tv, SparseMatrixCSC{Tv,Ti}}) where - {Tv<:Union{Float16, Float32, ComplexF32, ComplexF16}, Ti} = +Sparse(A::Hermitian{Tv, SparseMatrixCSC{Tv,Ti}}) where + {Tv<:Union{Float16, Float32, ComplexF32, ComplexF16}, Ti} = Sparse{promote_type(Float32, Tv), Ti <: ITypes ? Ti : promote_type(Ti, Int)}( A.data, A.uplo == 'L' ? -1 : 1 ) @@ -1076,7 +1076,7 @@ function Base.convert(::Type{Sparse{Tnew, Inew}}, A::Sparse{Tv, Ti}) where {Tnew a = unsafe_load(typedpointer(A)) S = allocate_sparse(a.nrow, a.ncol, a.nzmax, Bool(a.sorted), Bool(a.packed), a.stype, Tnew, Inew) s = unsafe_load(typedpointer(S)) - + ap = unsafe_wrap(Array, a.p, (a.ncol + 1,), own = false) sp = unsafe_wrap(Array, s.p, (s.ncol + 1,), own = false) copyto!(sp, ap) @@ -1376,7 +1376,7 @@ end ## Multiplication (*)(A::Sparse, B::Sparse) = ssmult(A, B, 0, true, true) -(*)(A::Sparse, B::Dense) = sdmult!(A, false, 1., 0., B, +(*)(A::Sparse, B::Dense) = sdmult!(A, false, 1., 0., B, zeros(size(A, 1), size(B, 2), promote_type(eltype(A), eltype(B))) ) (*)(A::Sparse, B::VecOrMat) = (*)(A, Dense(B)) @@ -1413,7 +1413,7 @@ function *(adjA::Adjoint{<:Any,<:Sparse}, B::Sparse) end *(adjA::Adjoint{<:Any,<:Sparse}, B::Dense) = ( - A = parent(adjA); sdmult!(A, true, 1., 0., B, + A = parent(adjA); sdmult!(A, true, 1., 0., B, zeros(size(A, 2), size(B, 2), promote_type(eltype(A), eltype(B)))) ) *(adjA::Adjoint{<:Any,<:Sparse}, B::VecOrMat) = adjA * Dense(B) @@ -1423,25 +1423,33 @@ end ## Compute that symbolic factorization only function symbolic(A::Sparse{<:VTypes, Ti}; - perm::Union{Nothing,AbstractVector{<:Integer}}=nothing, - postorder::Bool=isnothing(perm)||isempty(perm), userperm_only::Bool=true) where Ti + perm::Union{Nothing,AbstractVector{<:Integer}}=nothing, + postorder::Bool=isnothing(perm)||isempty(perm), + userperm_only::Bool=true, + nested_dissection::Bool=false) where Ti sA = unsafe_load(pointer(A)) sA.stype == 0 && throw(ArgumentError("sparse matrix is not symmetric/Hermitian")) - @cholmod_param postorder = postorder begin - if perm === nothing || isempty(perm) # TODO: deprecate empty perm - return analyze(A) - else # user permutation provided - if userperm_only # use perm even if it is worse than AMD - @cholmod_param nmethods = 1 begin + # The default is to just use AMD. Use nested dissection only if explicitly asked for. + # https://github.com/JuliaSparse/SparseArrays.jl/issues/548 + # https://github.com/DrTimothyAldenDavis/SuiteSparse/blob/26ababc7f3af725c5fb9168a1b94850eab74b666/CHOLMOD/Include/cholmod.h#L555-L574 + @cholmod_param nmethods = (nested_dissection ? 0 : 2) begin + @cholmod_param postorder = postorder begin + if perm === nothing || isempty(perm) # TODO: deprecate empty perm + return analyze(A) + else # user permutation provided + if userperm_only # use perm even if it is worse than AMD + @cholmod_param nmethods = 1 begin + return analyze_p(A, Ti[p-1 for p in perm]) + end + else return analyze_p(A, Ti[p-1 for p in perm]) end - else - return analyze_p(A, Ti[p-1 for p in perm]) end end end + end function cholesky!(F::Factor{Tv}, A::Sparse{Tv}; @@ -1467,7 +1475,7 @@ See also [`cholesky`](@ref). !!! note This method uses the CHOLMOD library from SuiteSparse, which only supports - real or complex types in single or double precision. + real or complex types in single or double precision. Input matrices not of those element types will be converted to these types as appropriate. """ @@ -1587,8 +1595,8 @@ true !!! note This method uses the CHOLMOD[^ACM887][^DavisHager2009] library from [SuiteSparse](https://github.com/DrTimothyAldenDavis/SuiteSparse). - CHOLMOD only supports real or complex types in single or double precision. - Input matrices not of those element types will be + CHOLMOD only supports real or complex types in single or double precision. + Input matrices not of those element types will be converted to these types as appropriate. Many other functions from CHOLMOD are wrapped but not exported from the @@ -1633,8 +1641,8 @@ have the type tag, it must still be symmetric or Hermitian. See also [`ldlt`](@ref). !!! note - This method uses the CHOLMOD library from [SuiteSparse](https://github.com/DrTimothyAldenDavis/SuiteSparse), - which only supports real or complex types in single or double precision. + This method uses the CHOLMOD library from [SuiteSparse](https://github.com/DrTimothyAldenDavis/SuiteSparse), + which only supports real or complex types in single or double precision. Input matrices not of those element types will be converted to these types as appropriate. """ @@ -1695,7 +1703,7 @@ it should be a permutation of `1:size(A,1)` giving the ordering to use !!! note This method uses the CHOLMOD[^ACM887][^DavisHager2009] library from [SuiteSparse](https://github.com/DrTimothyAldenDavis/SuiteSparse). - CHOLMOD only supports real or complex types in single or double precision. + CHOLMOD only supports real or complex types in single or double precision. Input matrices not of those element types will be converted to these types as appropriate. @@ -1767,7 +1775,7 @@ See also [`lowrankupdate!`](@ref), [`lowrankdowndate`](@ref), [`lowrankdowndate! """ lowrankupdate(F::Factor{Tv}, V::AbstractArray{Tv2}) where {Tv, Tv2} = lowrankupdate!( - change_xdtype(F, promote_type(Tv, Tv2)), + change_xdtype(F, promote_type(Tv, Tv2)), convert(AbstractArray{promote_type(Tv, Tv2)}, V) ) @@ -1782,7 +1790,7 @@ See also [`lowrankdowndate!`](@ref), [`lowrankupdate`](@ref), [`lowrankupdate!`] """ lowrankdowndate(F::Factor{Tv}, V::AbstractArray{Tv2}) where {Tv, Tv2} = lowrankdowndate!( - change_xdtype(F, promote_type(Tv, Tv2)), + change_xdtype(F, promote_type(Tv, Tv2)), convert(AbstractArray{promote_type(Tv, Tv2)}, V) ) From 7bc65aba8cde80ba8de8c4c443625cb47199bea4 Mon Sep 17 00:00:00 2001 From: "Viral B. Shah" Date: Sat, 2 Nov 2024 12:59:12 -0400 Subject: [PATCH 8/9] Update CI --- .github/workflows/ci.yml | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 80b3269b..bd4c83fe 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,30 +26,23 @@ jobs: - '1.11' os: - ubuntu-latest - - macOS-latest - windows-latest arch: - x64 - - x86 - exclude: + include: - os: macOS-latest + arch: aarch64 + version: '1.11' + - os: ubuntu-latest arch: x86 + version: '1.11' steps: - uses: actions/checkout@v4 - - uses: julia-actions/setup-julia@v1 + - uses: julia-actions/setup-julia@latest with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} - - uses: actions/cache@v4 - env: - cache-name: cache-artifacts - with: - path: ~/.julia/artifacts - key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} - restore-keys: | - ${{ runner.os }}-test-${{ env.cache-name }}- - ${{ runner.os }}-test-${{ matrix.os }} - ${{ runner.os }}- + - uses: julia-actions/cache@v2 - run: julia --color=yes .ci/test_and_change_uuid.jl - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-runtest@v1 @@ -67,7 +60,6 @@ jobs: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@latest with: - # version: '1.6' version: '1.11' - name: Generate docs run: | From 298f5e11d4b6bb7acd803780c5531e6c8eaa30e9 Mon Sep 17 00:00:00 2001 From: William Moses Date: Mon, 11 Nov 2024 16:47:30 -0600 Subject: [PATCH 9/9] Break recursion (#579) --- src/sparsevector.jl | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/sparsevector.jl b/src/sparsevector.jl index 41498b50..b80fbe6c 100644 --- a/src/sparsevector.jl +++ b/src/sparsevector.jl @@ -1277,13 +1277,16 @@ function vcat(X1::_SparseConcatGroup, X::_SparseConcatGroup...) end return Base.typed_vcat(Base.promote_eltype(X1, X...), X1, X...) end -function hvcat(rows::Tuple{Vararg{Int}}, X1::_SparseConcatGroup, X::_SparseConcatGroup...) +function hvcat_internal(rows::Tuple{Vararg{Int}}, X1::_SparseConcatGroup, X::_SparseConcatGroup...) if anysparse(X1) || anysparse(X...) vcat(_hvcat_rows(rows, X1, X...)...) else Base.typed_hvcat(Base.promote_eltypeof(X1, X...), rows, X1, X...) end end +function hvcat(rows::Tuple{Vararg{Int}}, X1::_SparseConcatGroup, X::_SparseConcatGroup...) + return hvcat_internal(rows, X1, X...) +end function _hvcat_rows((row1, rows...)::Tuple{Vararg{Int}}, X::_SparseConcatGroup...) if row1 ≤ 0 throw(ArgumentError("length of block row must be positive, got $row1")) @@ -1304,9 +1307,8 @@ hcat(n1::Number, ns::Vararg{Number}) = invoke(hcat, Tuple{Vararg{Number}}, n1, n vcat(n1::Number, ns::Vararg{Number}) = invoke(vcat, Tuple{Vararg{Number}}, n1, ns...) hcat(n1::N, ns::Vararg{N}) where {N<:Number} = invoke(hcat, Tuple{Vararg{N}}, n1, ns...) vcat(n1::N, ns::Vararg{N}) where {N<:Number} = invoke(vcat, Tuple{Vararg{N}}, n1, ns...) -hvcat(rows::Tuple{Vararg{Int}}, n1::Number, ns::Vararg{Number}) = invoke(hvcat, Tuple{typeof(rows), Vararg{Number}}, rows, n1, ns...) -hvcat(rows::Tuple{Vararg{Int}}, n1::N, ns::Vararg{N}) where {N<:Number} = invoke(hvcat, Tuple{typeof(rows), Vararg{N}}, rows, n1, ns...) - +hvcat(rows::Tuple{Vararg{Int}}, n1::Number, ns::Vararg{Number}) = hvcat_internal(rows, n1, ns...) +hvcat(rows::Tuple{Vararg{Int}}, n1::N, ns::Vararg{N}) where {N<:Number} = hvcat_internal(rows, n1, ns...) # make sure UniformScaling objects are converted to sparse matrices for concatenation promote_to_array_type(A::Tuple{Vararg{Union{_SparseConcatGroup,UniformScaling}}}) = anysparse(A...) ? SparseMatrixCSC : Matrix