Skip to content

Commit

Permalink
Add sparse!(I, J, V) and spzeros!(I, J) (#397)
Browse files Browse the repository at this point in the history
This patch adds convenience wrappers around the "full" calls to
`sparse!` and `spzeros` that allocates the intermediate arrays of
appropriate size, but reuses the input vectors for the final matrix
storage.

These methods are useful in the quite common case where the COO
representation is constructed with the sole purpose of constructing the
sparse matrix and then thrown away.
  • Loading branch information
fredrikekre authored Jun 15, 2023
1 parent 2c8b8b1 commit 6ccc974
Show file tree
Hide file tree
Showing 3 changed files with 101 additions and 0 deletions.
2 changes: 2 additions & 0 deletions docs/src/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -211,12 +211,14 @@ SparseArrays.AbstractSparseMatrix
SparseArrays.SparseVector
SparseArrays.SparseMatrixCSC
SparseArrays.sparse
SparseArrays.sparse!
SparseArrays.sparsevec
Base.similar(::SparseArrays.AbstractSparseMatrixCSC, ::Type)
SparseArrays.issparse
SparseArrays.nnz
SparseArrays.findnz
SparseArrays.spzeros
SparseArrays.spzeros!
SparseArrays.spdiagm
SparseArrays.sparse_hcat
SparseArrays.sparse_vcat
Expand Down
53 changes: 53 additions & 0 deletions src/sparsematrix.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1270,6 +1270,34 @@ function sparse!(I::AbstractVector{Ti}, J::AbstractVector{Ti},
Vector{Ti}(undef, n+1), Vector{Ti}(), Vector{Tv}())
end

"""
SparseArrays.sparse!(I, J, V, [m, n, combine]) -> SparseMatrixCSC
Variant of `sparse!` that re-uses the input vectors (`I`, `J`, `V`) for the final matrix
storage. After construction the input vectors will alias the matrix buffers; `S.colptr ===
I`, `S.rowval === J`, and `S.nzval === V` holds, and they will be `resize!`d as necessary.
Note that some work buffers will still be allocated. Specifically, this method is a
convenience wrapper around `sparse!(I, J, V, m, n, combine, klasttouch, csrrowptr,
csrcolval, csrnzval, csccolptr, cscrowval, cscnzval)` where this method allocates
`klasttouch`, `csrrowptr`, `csrcolval`, and `csrnzval` of appropriate size, but reuses `I`,
`J`, and `V` for `csccolptr`, `cscrowval`, and `cscnzval`.
Arguments `m`, `n`, and `combine` defaults to `maximum(I)`, `maximum(J)`, and `+`,
respectively.
!!! compat "Julia 1.10"
This method requires Julia version 1.10 or later.
"""
function sparse!(I::AbstractVector{Ti}, J::AbstractVector{Ti}, V::AbstractVector{Tv},
m::Integer=dimlub(I), n::Integer=dimlub(J), combine::Function=+) where {Tv, Ti<:Integer}
klasttouch = Vector{Ti}(undef, n)
csrrowptr = Vector{Ti}(undef, m + 1)
csrcolval = Vector{Ti}(undef, length(I))
csrnzval = Vector{Tv}(undef, length(I))
sparse!(I, J, V, Int(m), Int(n), combine, klasttouch, csrrowptr, csrcolval, csrnzval, I, J, V)
end

dimlub(I) = isempty(I) ? 0 : Int(maximum(I)) #least upper bound on required sparse matrix dimension

sparse(I,J,v::Number) = sparse(I, J, fill(v,length(I)))
Expand Down Expand Up @@ -2116,6 +2144,31 @@ function spzeros!(::Type{Tv}, I::AbstractVector{Ti}, J::AbstractVector{Ti}, m::I
csrrowptr, csrcolval, cscnzval, csccolptr, cscrowval, cscnzval)
end

"""
SparseArrays.spzeros!(::Type{Tv}, I, J, [m, n]) -> SparseMatrixCSC{Tv}
Variant of `spzeros!` that re-uses the input vectors `I` and `J` for the final matrix
storage. After construction the input vectors will alias the matrix buffers; `S.colptr ===
I` and `S.rowval === J` holds, and they will be `resize!`d as necessary.
Note that some work buffers will still be allocated. Specifically, this method is a
convenience wrapper around `spzeros!(Tv, I, J, m, n, klasttouch, csrrowptr, csrcolval,
csccolptr, cscrowval)` where this method allocates `klasttouch`, `csrrowptr`, and
`csrcolval` of appropriate size, but reuses `I` and `J` for `csccolptr` and `cscrowval`.
Arguments `m` and `n` defaults to `maximum(I)` and `maximum(J)`.
!!! compat "Julia 1.10"
This method requires Julia version 1.10 or later.
"""
function spzeros!(::Type{Tv}, I::AbstractVector{Ti}, J::AbstractVector{Ti},
m::Integer=dimlub(I), n::Integer=dimlub(J)) where {Tv, Ti <: Integer}
klasttouch = Vector{Ti}(undef, n)
csrrowptr = Vector{Ti}(undef, m + 1)
csrcolval = Vector{Ti}(undef, length(I))
return spzeros!(Tv, I, J, Int(m), Int(n), klasttouch, csrrowptr, csrcolval, I, J)
end

import Base._one
function Base._one(unit::T, S::AbstractSparseMatrixCSC) where T
size(S, 1) == size(S, 2) || throw(DimensionMismatch("multiplicative identity only defined for square matrices"))
Expand Down
46 changes: 46 additions & 0 deletions test/sparsematrix_constructors_indexing.jl
Original file line number Diff line number Diff line change
Expand Up @@ -1751,6 +1751,52 @@ end
@test getcolptr(S!) === I
@test getrowval(S!) === J
@test nonzeros(S!) === V

# Test reuse of I, J, V for the matrix buffers in
# sparse!(I, J, V), sparse!(I, J, V, m, n), sparse!(I, J, V, m, n, combine),
# spzeros!(T, I, J), and spzeros!(T, I, J, m, n).
I, J, V = allocate_arrays(m, n)
S = sparse(I, J, V)
S! = sparse!(I, J, V)
@test S == S!
@test same_structure(S, S!)
@test getcolptr(S!) === I
@test getrowval(S!) === J
@test nonzeros(S!) === V
I, J, V = allocate_arrays(m, n)
S = sparse(I, J, V, 2m, 2n)
S! = sparse!(I, J, V, 2m, 2n)
@test S == S!
@test same_structure(S, S!)
@test getcolptr(S!) === I
@test getrowval(S!) === J
@test nonzeros(S!) === V
I, J, V = allocate_arrays(m, n)
S = sparse(I, J, V, 2m, 2n, *)
S! = sparse!(I, J, V, 2m, 2n, *)
@test S == S!
@test same_structure(S, S!)
@test getcolptr(S!) === I
@test getrowval(S!) === J
@test nonzeros(S!) === V
for T in (Float32, Float64)
I, J, = allocate_arrays(m, n)
S = spzeros(T, I, J)
S! = spzeros!(T, I, J)
@test S == S!
@test same_structure(S, S!)
@test eltype(S) == eltype(S!) == T
@test getcolptr(S!) === I
@test getrowval(S!) === J
I, J, = allocate_arrays(m, n)
S = spzeros(T, I, J, 2m, 2n)
S! = spzeros!(T, I, J, 2m, 2n)
@test S == S!
@test same_structure(S, S!)
@test eltype(S) == eltype(S!) == T
@test getcolptr(S!) === I
@test getrowval(S!) === J
end
end
end

Expand Down

0 comments on commit 6ccc974

Please sign in to comment.