diff --git a/scipy/sparse/_csparsetools.pyx b/scipy/sparse/_csparsetools.pyx index f5d001031b10..92dd96e52ba5 100644 --- a/scipy/sparse/_csparsetools.pyx +++ b/scipy/sparse/_csparsetools.pyx @@ -11,8 +11,8 @@ import numpy as np ctypedef fused idx_t: - cnp.int32_t - cnp.int64_t + cnp.npy_int32 + cnp.npy_int64 ctypedef fused value_t: @@ -27,8 +27,39 @@ ctypedef fused value_t: cnp.npy_uint64 cnp.npy_float32 cnp.npy_float64 + long double float complex double complex + long double complex + + +# Use .char to work around dtype comparison bugs in earlier Numpy +# versions + +DTYPE_NAME_MAP = { + np.dtype(np.bool_).char: "npy_bool", + np.dtype(np.int8).char: "npy_int8", + np.dtype(np.uint8).char: "npy_uint8", + np.dtype(np.int16).char: "npy_int16", + np.dtype(np.uint16).char: "npy_uint16", + np.dtype(np.int32).char: "npy_int32", + np.dtype(np.uint32).char: "npy_uint32", + np.dtype(np.int64).char: "npy_int64", + np.dtype(np.uint64).char: "npy_uint64", + np.dtype(np.float32).char: "npy_float32", + np.dtype(np.float64).char: "npy_float64", + np.dtype(np.longdouble).char: "long double", + np.dtype(np.complex64).char: "float complex", + np.dtype(np.complex128).char: "double complex", + np.dtype(np.clongdouble).char: "long double complex" +} + +if np.dtype('q').itemsize == 4: + DTYPE_NAME_MAP['q'] = "npy_int32" + DTYPE_NAME_MAP['Q'] = "npy_uint32" +elif np.dtype('q').itemsize == 8: + DTYPE_NAME_MAP['q'] = "npy_int64" + DTYPE_NAME_MAP['Q'] = "npy_uint64" def prepare_index_for_memoryview(cnp.ndarray i, cnp.ndarray j, cnp.ndarray x=None): @@ -109,8 +140,22 @@ cpdef lil_get1(cnp.npy_intp M, cnp.npy_intp N, object[:] rows, object[:] datas, return 0 -cpdef lil_insert(cnp.npy_intp M, cnp.npy_intp N, object[:] rows, object[:] datas, - cnp.npy_intp i, cnp.npy_intp j, value_t x): +def lil_insert(cnp.npy_intp M, cnp.npy_intp N, object[:] rows, object[:] datas, + cnp.npy_intp i, cnp.npy_intp j, object x, object dtype): + """ + Work around broken Cython fused type dispatch + """ + dtype = np.dtype(dtype) + try: + key = DTYPE_NAME_MAP[dtype.char] + except KeyError: + raise ValueError("Unsupported data type: %r" % (dtype.char,)) + + _lil_insert[key](M, N, rows, datas, i, j, x) + + +cpdef _lil_insert(cnp.npy_intp M, cnp.npy_intp N, object[:] rows, object[:] datas, + cnp.npy_intp i, cnp.npy_intp j, value_t x): """ Insert a single item to LIL matrix. @@ -194,12 +239,42 @@ def lil_fancy_get(cnp.npy_intp M, cnp.npy_intp N, new_rows[x] = new_row new_datas[x] = new_data + def lil_fancy_set(cnp.npy_intp M, cnp.npy_intp N, object[:] rows, object[:] data, - idx_t[:,:] i_idx, - idx_t[:,:] j_idx, - value_t[:,:] values): + object i_idx, + object j_idx, + object values): + """ + Work around broken Cython fused type dispatch + """ + try: + key = DTYPE_NAME_MAP[values.dtype.char] + except KeyError: + raise ValueError("Unsupported data type: %r" % (values.dtype.char,)) + try: + ikey = DTYPE_NAME_MAP[i_idx.dtype.char] + except KeyError: + raise ValueError("Unsupported data type: %r" % (i_idx.dtype.char,)) + + if key != "npy_bool": + _lil_fancy_set[ikey, key](M, N, rows, data, i_idx, j_idx, values) + else: + # bool has no memoryview support + for x in range(i_idx.shape[0]): + for y in range(i_idx.shape[1]): + i = i_idx[x,y] + j = j_idx[x,y] + _lil_insert[key](M, N, rows, data, i, j, values[x, y]) + + +cpdef _lil_fancy_set(cnp.npy_intp M, cnp.npy_intp N, + object[:] rows, + object[:] data, + idx_t[:,:] i_idx, + idx_t[:,:] j_idx, + value_t[:,:] values): """ Set multiple items to a LIL matrix. @@ -222,7 +297,7 @@ def lil_fancy_set(cnp.npy_intp M, cnp.npy_intp N, for y in range(i_idx.shape[1]): i = i_idx[x,y] j = j_idx[x,y] - lil_insert[value_t](M, N, rows, data, i, j, values[x, y]) + _lil_insert[value_t](M, N, rows, data, i, j, values[x, y]) cdef lil_insertat_nocheck(list row, list data, cnp.npy_intp j, object x): diff --git a/scipy/sparse/lil.py b/scipy/sparse/lil.py index b332c4103f99..103f116e2c8f 100644 --- a/scipy/sparse/lil.py +++ b/scipy/sparse/lil.py @@ -283,7 +283,7 @@ def __setitem__(self, index, x): raise ValueError("Trying to assign a sequence to an item") _csparsetools.lil_insert(self.shape[0], self.shape[1], self.rows, self.data, - i, j, x) + i, j, x, self.dtype) return # General indexing @@ -326,16 +326,16 @@ def _mul_scalar(self, other): new = self.copy() new = new.astype(res_dtype) # Multiply this scalar by every element. - new.data[:] = [[val*other for val in rowvals] for - rowvals in new.data] + for j, rowvals in enumerate(new.data): + new.data[j] = [val*other for val in rowvals] return new def __truediv__(self, other): # self / other if isscalarlike(other): new = self.copy() # Divide every element by this scalar - new.data[:] = [[val/other for val in rowvals] for - rowvals in new.data] + for j, rowvals in enumerate(new.data): + new.data[j] = [val/other for val in rowvals] return new else: return self.tocsr() / other diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 1532291654fa..23621b3a7648 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1800,44 +1800,56 @@ def check(dtype): class _TestGetSet: def test_getelement(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=SparseEfficiencyWarning) - D = array([[1,0,0], - [4,3,0], - [0,2,0], - [0,0,0]]) - A = self.spmatrix(D) + def check(dtype): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + D = array([[1,0,0], + [4,3,0], + [0,2,0], + [0,0,0]], dtype=dtype) + A = self.spmatrix(D) + + M,N = D.shape - M,N = D.shape + for i in range(-M, M): + for j in range(-N, N): + assert_equal(A[i,j], D[i,j]) - for i in range(-M, M): - for j in range(-N, N): - assert_equal(A[i,j], D[i,j]) + for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: + assert_raises((IndexError, TypeError), A.__getitem__, ij) - for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: - assert_raises((IndexError, TypeError), A.__getitem__, ij) + for dtype in supported_dtypes: + yield check, np.dtype(dtype) def test_setelement(self): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=SparseEfficiencyWarning) - A = self.spmatrix((3,4)) - A[0, 0] = 0 # bug 870 - A[1, 2] = 4.0 - A[0, 1] = 3 - A[2, 0] = 2.0 - A[0,-1] = 8 - A[-1,-2] = 7 - A[0, 1] = 5 - assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]]) + def check(dtype): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix((3,4), dtype=dtype) + A[0, 0] = dtype.type(0) # bug 870 + A[1, 2] = dtype.type(4.0) + A[0, 1] = dtype.type(3) + A[2, 0] = dtype.type(2.0) + A[0,-1] = dtype.type(8) + A[-1,-2] = dtype.type(7) + A[0, 1] = dtype.type(5) + + if dtype != np.bool_: + assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]]) - for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: - assert_raises(IndexError, A.__setitem__, ij, 123.0) + for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: + assert_raises(IndexError, A.__setitem__, ij, 123.0) - for v in [[1,2,3], array([1,2,3])]: - assert_raises(ValueError, A.__setitem__, (0,0), v) + for v in [[1,2,3], array([1,2,3])]: + assert_raises(ValueError, A.__setitem__, (0,0), v) - for v in [3j]: - assert_raises(TypeError, A.__setitem__, (0,0), v) + if (not np.issubdtype(dtype, np.complexfloating) and + dtype != np.bool_): + for v in [3j]: + assert_raises(TypeError, A.__setitem__, (0,0), v) + + for dtype in supported_dtypes: + yield check, np.dtype(dtype) def test_scalar_assign_2(self): n, m = (5, 10) @@ -2414,6 +2426,19 @@ def _test_set_slice(i, j): for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]: _test_set_slice(i, j) + def test_fancy_assignment_dtypes(self): + def check(dtype): + A = self.spmatrix((5, 5), dtype=dtype) + A[[0,1],[0,1]] = dtype.type(1) + assert_equal(A.sum(), dtype.type(1)*2) + A[0:2,0:2] = dtype.type(1.0) + assert_equal(A.sum(), dtype.type(1)*4) + A[2,2] = dtype.type(1.0) + assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1)) + + for dtype in supported_dtypes: + yield check, np.dtype(dtype) + def test_sequence_assignment(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning)