In Python, for my application it is usually best to create a sparse matrix by creating a sparse COO matrix with rows, columns and values arrays and then changing it to CSC (CSR) format.
Now, say I want to condense the CSC matrix. What is an efficient way to do so? The condensation rows/columns vary during the code and are much smaller than the dimensions of the sparse matrix, so I do not believe rebuilding the COO matrix is efficient.
The following MWE shows an example for creating the condensed matrix, but without any optimization attempts. There is a sparse efficiency warning because the number of nonzeros is increased. In this MWE I use dia_array
to create the sparse matrix for simplicity.
import numpy as np
from scipy.sparse import dia_array
def main():
n = 10
m = 6
data = np.tile(np.concatenate((np.arange(1, m+1),
np.arange(m-1, 0, -1)))[:, np.newaxis], (1, n))
offsets = np.arange(-m+1, m)
A = dia_array((data, offsets), shape=(n, n)).tocsc()
print("Matrix A:")
print(repr(A.toarray()))
# condense these rows/columns
cond_rowscols = np.arange(n-8, n, 2)
print("Condensed rows/columns of A:")
print(repr(cond_rowscols))
# IMPROVE HERE
# condensation algorithm
B = A.copy()
B[[cond_rowscols[0]]] += B[cond_rowscols[1:]].sum(axis=0)
B[:, [cond_rowscols[0]]] += B[:, cond_rowscols[1:]].sum(axis=1)[:, np.newaxis]
free_rowscols = np.ones(n, dtype=bool)
free_rowscols[cond_rowscols[1:]] = False
B = B[np.ix_(free_rowscols, free_rowscols)]
print("Condensed matrix A:")
print(repr(B.toarray()))
print('Done')
if __name__ == "__main__":
main()
The output is:
Matrix A:
array([[6, 5, 4, 3, 2, 1, 0, 0, 0, 0],
[5, 6, 5, 4, 3, 2, 1, 0, 0, 0],
[4, 5, 6, 5, 4, 3, 2, 1, 0, 0],
[3, 4, 5, 6, 5, 4, 3, 2, 1, 0],
[2, 3, 4, 5, 6, 5, 4, 3, 2, 1],
[1, 2, 3, 4, 5, 6, 5, 4, 3, 2],
[0, 1, 2, 3, 4, 5, 6, 5, 4, 3],
[0, 0, 1, 2, 3, 4, 5, 6, 5, 4],
[0, 0, 0, 1, 2, 3, 4, 5, 6, 5],
[0, 0, 0, 0, 1, 2, 3, 4, 5, 6]])
Condensed rows/columns of A:
array([2, 4, 6, 8])
Condensed matrix A:
array([[ 6, 5, 6, 3, 1, 0, 0],
[ 5, 6, 9, 4, 2, 0, 0],
[ 6, 9, 56, 14, 16, 14, 9],
[ 3, 4, 14, 6, 4, 2, 0],
[ 1, 2, 16, 4, 6, 4, 2],
[ 0, 0, 14, 2, 4, 6, 4],
[ 0, 0, 9, 0, 2, 4, 6]])
Done
Edit: As per hpaulj's comment, we can create a condensation matrix T
:
# condensation with matrix multiplication
n_conds = cond_rowscols.shape[0] # number of condensed rows/cols
t_vals = np.ones(n, dtype=int)
t_rows = np.arange(n)
t_cols = np.empty_like(t_rows)
t_cols[free_rowscols] = np.arange(n-n_conds+1)
t_cols[cond_rowscols[1:]] = cond_rowscols[0]
T = csc_array((t_vals, (t_rows, t_cols)), shape=(n, n-n_conds+1))
Such that the condensed matrix A is B = T.T @ A @ T
.
T is:
>>>print(repr(T.toarray()))
array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1]], dtype=int32)
This does not yield any sparse efficiency errors! I still have to time it, though, for larger problems. Does scipy.sparse
use sparse BLAS for sparse matrix multiplications?
I have created a test case for both options shown in the MWE with a simple performance comparison, using a smaller set of values than I intend to use in practice.
It seems that the in-place sum option takes about 5 times more than the multiplication option.
Since solving linear systems is always much more expensive, I think I'll settle with the sparse matrix multiplication option, for now.
import numpy as np
from scipy.sparse import dia_array, csr_array
from time import perf_counter
def build_sp_mat(n=None, m=None):
"""Generates a square sparse CSC matrix of size `n` and half bandwidth `m`"""
if n is None:
n = 1_000 # matrix size
if m is None:
m = 50 # half bandwidth
data = np.tile(np.concatenate((np.arange(1, m+1),
np.arange(m-1, 0, -1)))[:, np.newaxis], (1, n))
offsets = np.arange(-m+1, m)
A = dia_array((data, offsets), shape=(n, n)).tocsc()
return A
def condensed_rowscols(n, n_conds=None):
""""Returns the indices of the condensed rows/columns
Also returns a boolean mask of the non-condensed rows/columns
"""
if n_conds is None:
n_conds = 50 # total number of condensed rows/columns
# condense these rows/columns
cond_rowscols = np.arange(n//n_conds-1, n, n//n_conds)
free_rowscols = np.ones(n, dtype=bool)
free_rowscols[cond_rowscols[1:]] = False
print(repr(cond_rowscols))
return cond_rowscols, free_rowscols
def condense_in_place_sum(A, cond_rowscols, free_rowscols):
"""Performs condensation via a sum of the condensed rows and columns"""
A[[cond_rowscols[0]]] += A[cond_rowscols[1:]].sum(axis=0)
A[:, [cond_rowscols[0]]] += A[:, cond_rowscols[1:]].sum(axis=1)[:, np.newaxis]
Acond = A[np.ix_(free_rowscols, free_rowscols)] # indices are sorted
return Acond
def condense_mat_mult(A, cond_rowscols, free_rowscols, n, n_conds):
"""Performs condensation via sparse matrix multiplications"""
t_vals = np.ones(n, dtype=int)
t_rows = np.arange(n)
t_cols = np.empty_like(t_rows)
t_cols[free_rowscols] = np.arange(n-n_conds+1)
t_cols[cond_rowscols[1:]] = cond_rowscols[0]
T = csr_array((t_vals, (t_rows, t_cols)), shape=(n, n-n_conds+1))
Acond = T.T @ A @ T
Acond.sort_indices() # indices have to be sorted
return Acond, T
if __name__ == "__main__":
n, m, n_conds = 500_000, 54, 1000
A = build_sp_mat(n, m)
cond_rowscols, free_rowscols = condensed_rowscols(n, n_conds)
A.sort_indices() # this is important
B = A.copy()
stime = perf_counter()
Acond1 = condense_in_place_sum(B, cond_rowscols, free_rowscols)
print(f"Condensation via in-place sum took {perf_counter()-stime} s")
# Acond1 comes with its indices array sorted automatically
stime = perf_counter()
Acond2, _ = condense_mat_mult(A, cond_rowscols, free_rowscols, n, n_conds)
print(f"Condensation via sparse multiplication took {perf_counter()-stime} s")
# Acond2's indices array has to be sorted, as shown in the function
print("Done")
The output is
Condensation via in-place sum took 9.0079096 s
Condensation via sparse multiplication took 1.4657909 s
Done