Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

ENH: sparse: update 'raise' statements

  • Loading branch information...
commit 1559800da52eb2f7286400aa1625ad08dc17f7b3 1 parent 724b672
warren.weckesser authored
View
22 scipy/sparse/base.py
@@ -52,8 +52,8 @@ def __init__(self, maxprint=MAXPRINT):
self.format = self.__class__.__name__[:3]
self._shape = None
if self.format == 'spm':
- raise ValueError, "This class is not intended" \
- " to be instantiated directly."
+ raise ValueError("This class is not intended"
+ " to be instantiated directly.")
self.maxprint = maxprint
def set_shape(self,shape):
@@ -101,8 +101,8 @@ def asfptype(self):
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type)
- raise TypeError,'cannot upcast [%s] to a floating \
- point format' % self.dtype.name
+ raise TypeError('cannot upcast [%s] to a floating '
+ 'point format' % self.dtype.name)
def __iter__(self):
for r in xrange(self.shape[0]):
@@ -126,7 +126,7 @@ def getnnz(self):
try:
return self.nnz
except AttributeError:
- raise AttributeError, "nnz not defined"
+ raise AttributeError("nnz not defined")
def getformat(self):
try:
@@ -187,8 +187,8 @@ def __nonzero__(self): # Simple -- other ideas?
# non-zeros is more important. For now, raise an exception!
def __len__(self):
# return self.getnnz()
- raise TypeError, "sparse matrix length is ambiguous; use getnnz()" \
- " or shape[0]"
+ raise TypeError("sparse matrix length is ambiguous; use getnnz()"
+ " or shape[0]")
def asformat(self, format):
"""Return this matrix in a given sparse format
@@ -426,7 +426,7 @@ def __getattr__(self, attr):
elif attr == 'size':
return self.getnnz()
else:
- raise AttributeError, attr + " not found"
+ raise AttributeError(attr + " not found")
def transpose(self):
return self.tocsr().transpose()
@@ -541,7 +541,7 @@ def sum(self, axis=None):
# sum over rows and columns
return ( self * np.asmatrix(np.ones((n, 1), dtype=self.dtype)) ).sum()
else:
- raise ValueError, "axis out of bounds"
+ raise ValueError("axis out of bounds")
def mean(self, axis=None):
"""Average the matrix over the given axis. If the axis is None,
@@ -558,7 +558,7 @@ def mean(self, axis=None):
elif axis is None:
return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1])
else:
- raise ValueError, "axis out of bounds"
+ raise ValueError("axis out of bounds")
def diagonal(self):
"""Returns the main diagonal of the matrix
@@ -577,7 +577,7 @@ def setdiag(self, values, k=0):
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
- raise ValueError, "k exceedes matrix dimensions"
+ raise ValueError("k exceedes matrix dimensions")
if k < 0:
max_index = min(M+k, N, len(values))
for i,v in enumerate(values[:max_index]):
View
34 scipy/sparse/bsr.py
@@ -119,7 +119,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
- raise ValueError, 'shape must be multiple of blocksize'
+ raise ValueError('shape must be multiple of blocksize')
self.indptr = np.zeros(M//R + 1, dtype=np.intc )
@@ -200,25 +200,23 @@ def check_format(self, full_check=True):
# check array shapes
if np.rank(self.indices) != 1 or np.rank(self.indptr) != 1:
- raise ValueError,"indices, and indptr should be rank 1"
+ raise ValueError("indices, and indptr should be rank 1")
if np.rank(self.data) != 3:
- raise ValueError,"data should be rank 3"
+ raise ValueError("data should be rank 3")
# check index pointer
if (len(self.indptr) != M//R + 1 ):
- raise ValueError, \
- "index pointer size (%d) should be (%d)" % \
- (len(self.indptr), M//R + 1)
+ raise ValueError("index pointer size (%d) should be (%d)" %
+ (len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
- raise ValueError,"index pointer should start with 0"
+ raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
- raise ValueError,"indices and data should have the same size"
+ raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
- raise ValueError, \
- "Last value of index pointer should be less than "\
- "the size of index and data arrays"
+ raise ValueError("Last value of index pointer should be less than "
+ "the size of index and data arrays")
self.prune()
@@ -227,12 +225,12 @@ def check_format(self, full_check=True):
if self.nnz > 0:
if self.indices.max() >= N//C:
print "max index",self.indices.max()
- raise ValueError, "column index values must be < %d" % (N//C)
+ raise ValueError("column index values must be < %d" % (N//C))
if self.indices.min() < 0:
- raise ValueError, "column index values must be >= 0"
+ raise ValueError("column index values must be >= 0")
if diff(self.indptr).min() < 0:
- raise ValueError,'index pointer values must form a " \
- "non-decreasing sequence'
+ raise ValueError("index pointer values must form a "
+ "non-decreasing sequence")
#if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
@@ -475,14 +473,14 @@ def prune(self):
M,N = self.shape
if len(self.indptr) != M//R + 1:
- raise ValueError, "index pointer has invalid length"
+ raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
- raise ValueError, "indices array has too few elements"
+ raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
- raise ValueError, "data array has too few elements"
+ raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
View
72 scipy/sparse/compressed.py
@@ -57,16 +57,16 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False, dims=None, nzmax=No
self.indptr = np.array(indptr, copy=copy)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
- raise ValueError, "unrecognized %s_matrix constructor usage" %\
- self.format
+ raise ValueError("unrecognized %s_matrix constructor usage" %
+ self.format)
else:
#must be dense
try:
arg1 = np.asarray(arg1)
except:
- raise ValueError, "unrecognized %s_matrix constructor usage" % \
- self.format
+ raise ValueError("unrecognized %s_matrix constructor usage" %
+ self.format)
from coo import coo_matrix
self._set_self( self.__class__(coo_matrix(arg1, dtype=dtype)) )
@@ -80,7 +80,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False, dims=None, nzmax=No
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except:
- raise ValueError,'unable to infer matrix dimensions'
+ raise ValueError('unable to infer matrix dimensions')
else:
self.shape = self._swap((major_dim,minor_dim))
@@ -139,19 +139,17 @@ def check_format(self, full_check=True):
# check index pointer
if (len(self.indptr) != major_dim + 1 ):
- raise ValueError, \
- "index pointer size (%d) should be (%d)" % \
- (len(self.indptr), major_dim + 1)
+ raise ValueError("index pointer size (%d) should be (%d)" %
+ (len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
- raise ValueError,"index pointer should start with 0"
+ raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
- raise ValueError,"indices and data should have the same size"
+ raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
- raise ValueError, \
- "Last value of index pointer should be less than "\
- "the size of index and data arrays"
+ raise ValueError("Last value of index pointer should be less than "
+ "the size of index and data arrays")
self.prune()
@@ -159,14 +157,14 @@ def check_format(self, full_check=True):
#check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
- raise ValueError, "%s index values must be < %d" % \
- (minor_name,minor_dim)
+ raise ValueError("%s index values must be < %d" %
+ (minor_name,minor_dim))
if self.indices.min() < 0:
- raise ValueError, "%s index values must be >= 0" % \
- minor_name
+ raise ValueError("%s index values must be >= 0" %
+ minor_name)
if np.diff(self.indptr).min() < 0:
- raise ValueError,'index pointer values must form a " \
- "non-decreasing sequence'
+ raise ValueError("index pointer values must form a "
+ "non-decreasing sequence")
#if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
@@ -179,11 +177,11 @@ def __add__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
# Now we would add this scalar to every element.
- raise NotImplementedError, 'adding a scalar to a CSC or CSR ' \
- 'matrix is not supported'
+ raise NotImplementedError('adding a scalar to a CSC or CSR '
+ 'matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
- raise ValueError, "inconsistent shapes"
+ raise ValueError("inconsistent shapes")
return self._binopt(other,'_plus_')
elif isdense(other):
@@ -199,11 +197,11 @@ def __sub__(self,other):
# First check if argument is a scalar
if isscalarlike(other):
# Now we would add this scalar to every element.
- raise NotImplementedError, 'adding a scalar to a sparse ' \
- 'matrix is not supported'
+ raise NotImplementedError('adding a scalar to a sparse '
+ 'matrix is not supported')
elif isspmatrix(other):
if (other.shape != self.shape):
- raise ValueError, "inconsistent shapes"
+ raise ValueError("inconsistent shapes")
return self._binopt(other,'_minus_')
elif isdense(other):
@@ -216,8 +214,8 @@ def __rsub__(self,other): # other - self
#note: this can't be replaced by other + (-self) for unsigned types
if isscalarlike(other):
# Now we would add this scalar to every element.
- raise NotImplementedError, 'adding a scalar to a sparse ' \
- 'matrix is not supported'
+ raise NotImplementedError('adding a scalar to a sparse '
+ 'matrix is not supported')
elif isdense(other):
# Convert this matrix to a dense matrix and subtract them
return other - self.todense()
@@ -330,7 +328,7 @@ def sum(self, axis=None):
return self.data.sum()
else:
return spmatrix.sum(self,axis)
- raise ValueError, "axis out of bounds"
+ raise ValueError("axis out of bounds")
#######################
# Getting and Setting #
@@ -361,7 +359,7 @@ def __getitem__(self, key):
elif isintlike(key):
return self[key, :]
else:
- raise IndexError, "invalid index"
+ raise IndexError("invalid index")
def _get_single_element(self,row,col):
@@ -395,9 +393,9 @@ def _get_slice(self, i, start, stop, stride, shape):
[start:stop:string, i] for column-oriented matrices
"""
if stride != 1:
- raise ValueError, "slicing with step != 1 not supported"
+ raise ValueError("slicing with step != 1 not supported")
if stop <= start:
- raise ValueError, "slice width must be >= 1"
+ raise ValueError("slice width must be >= 1")
#TODO make [i,:] faster
#TODO implement [i,x:y:z]
@@ -445,9 +443,8 @@ def _process_slice( sl, num ):
def _in_bounds( i0, i1, num ):
if not (0<=i0<num) or not (0<i1<=num) or not (i0<i1):
- raise IndexError,\
- "index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %\
- (i0, num, i1, num, i0, i1)
+ raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %
+ (i0, num, i1, num, i0, i1))
i0, i1 = _process_slice( slice0, shape0 )
j0, j1 = _process_slice( slice1, shape1 )
@@ -477,7 +474,7 @@ def __setitem__(self, key, val):
if (col < 0):
col += N
if not (0<=row<M) or not (0<=col<N):
- raise IndexError, "index out of bounds"
+ raise IndexError("index out of bounds")
major_index, minor_index = self._swap((row,col))
@@ -519,12 +516,13 @@ def __setitem__(self, key, val):
self.data[start:end][indxs[0]] = val
else:
#entry appears more than once
- raise ValueError,'nonzero entry (%d,%d) occurs more than once' % (row,col)
+ raise ValueError('nonzero entry (%d,%d) occurs more than once'
+ % (row,col))
self.check_format(full_check=True)
else:
# We should allow slices here!
- raise IndexError, "invalid index"
+ raise IndexError("invalid index")
######################
# Conversion methods #
View
4 scipy/sparse/csr.py
@@ -315,9 +315,9 @@ def _get_row_slice(self, i, cslice):
start, stop, stride = cslice.indices(self.shape[1])
if stride != 1:
- raise ValueError, "slicing with step != 1 not supported"
+ raise ValueError("slicing with step != 1 not supported")
if stop <= start:
- raise ValueError, "slice width must be >= 1"
+ raise ValueError("slice width must be >= 1")
#TODO make [i,:] faster
#TODO implement [i,x:y:z]
View
50 scipy/sparse/dok.py
@@ -174,8 +174,8 @@ def __getitem__(self, key):
###################################
# We should reshape the new matrix here!
###################################
- raise NotImplementedError, "fancy indexing supported over" \
- " one axis only"
+ raise NotImplementedError("fancy indexing supported over"
+ " one axis only")
return new
# Below here, j is a sequence, but i is an integer
@@ -186,14 +186,14 @@ def __getitem__(self, key):
seq = j
else:
# j is not an integer
- raise TypeError, "index must be a pair of integers or slices"
+ raise TypeError("index must be a pair of integers or slices")
# Create a new matrix of the correct dimensions
first = seq[0]
last = seq[-1]
if first < 0 or first >= self.shape[1] or last < 0 \
or last >= self.shape[1]:
- raise IndexError, "index out of bounds"
+ raise IndexError("index out of bounds")
newshape = (1, last-first+1)
new = dok_matrix(newshape)
# ** This uses linear time in the size n of dimension 1:
@@ -212,7 +212,7 @@ def __setitem__(self, key, value):
try:
i, j = key
except (ValueError, TypeError):
- raise TypeError, "index must be a pair of integers or slices"
+ raise TypeError("index must be a pair of integers or slices")
# First deal with the case where both i and j are integers
if isintlike(i) and isintlike(j):
@@ -222,7 +222,7 @@ def __setitem__(self, key, value):
j += self.shape[1]
if i < 0 or i >= self.shape[0] or j < 0 or j >= self.shape[1]:
- raise IndexError, "index out of bounds"
+ raise IndexError("index out of bounds")
if np.isscalar(value):
if value==0 and self.has_key((i,j)):
@@ -243,7 +243,7 @@ def __setitem__(self, key, value):
else:
# Make sure i is an integer. (But allow it to be a subclass of int).
if not isintlike(i):
- raise TypeError, "index must be a pair of integers or slices"
+ raise TypeError("index must be a pair of integers or slices")
seq = None
if seq is not None:
# First see if 'value' is another dok_matrix of the appropriate
@@ -253,8 +253,8 @@ def __setitem__(self, key, value):
for element in seq:
self[element, j] = value[element, 0]
else:
- raise NotImplementedError, "setting a 2-d slice of" \
- " a dok_matrix is not yet supported"
+ raise NotImplementedError("setting a 2-d slice of"
+ " a dok_matrix is not yet supported")
elif np.isscalar(value):
for element in seq:
self[element, j] = value
@@ -262,12 +262,12 @@ def __setitem__(self, key, value):
# See if value is a sequence
try:
if len(seq) != len(value):
- raise ValueError, "index and value ranges must" \
- " have the same length"
+ raise ValueError("index and value ranges must"
+ " have the same length")
except TypeError:
# Not a sequence
- raise TypeError, "unsupported type for" \
- " dok_matrix.__setitem__"
+ raise TypeError("unsupported type for"
+ " dok_matrix.__setitem__")
# Value is a sequence
for element, val in izip(seq, value):
@@ -283,7 +283,7 @@ def __setitem__(self, key, value):
seq = j
else:
# j is not an integer
- raise TypeError, "index must be a pair of integers or slices"
+ raise TypeError("index must be a pair of integers or slices")
# First see if 'value' is another dok_matrix of the appropriate
# dimensions
@@ -292,8 +292,8 @@ def __setitem__(self, key, value):
for element in seq:
self[i, element] = value[0, element]
else:
- raise NotImplementedError, "setting a 2-d slice of" \
- " a dok_matrix is not yet supported"
+ raise NotImplementedError("setting a 2-d slice of"
+ " a dok_matrix is not yet supported")
elif np.isscalar(value):
for element in seq:
self[i, element] = value
@@ -301,11 +301,11 @@ def __setitem__(self, key, value):
# See if value is a sequence
try:
if len(seq) != len(value):
- raise ValueError, "index and value ranges must have" \
- " the same length"
+ raise ValueError("index and value ranges must have"
+ " the same length")
except TypeError:
# Not a sequence
- raise TypeError, "unsupported type for dok_matrix.__setitem__"
+ raise TypeError("unsupported type for dok_matrix.__setitem__")
else:
for element, val in izip(seq, value):
self[i, element] = val
@@ -325,7 +325,7 @@ def __add__(self, other):
#new.dtype.char = self.dtype.char
elif isinstance(other, dok_matrix):
if other.shape != self.shape:
- raise ValueError, "matrix dimensions are not equal"
+ raise ValueError("matrix dimensions are not equal")
# We could alternatively set the dimensions to the the largest of
# the two matrices to be summed. Would this be a good idea?
new = dok_matrix(self.shape, dtype=self.dtype)
@@ -338,7 +338,7 @@ def __add__(self, other):
elif isdense(other):
new = self.todense() + other
else:
- raise TypeError, "data type not understood"
+ raise TypeError("data type not understood")
return new
def __radd__(self, other):
@@ -354,7 +354,7 @@ def __radd__(self, other):
new[i, j] = aij
elif isinstance(other, dok_matrix):
if other.shape != self.shape:
- raise ValueError, "matrix dimensions are not equal"
+ raise ValueError("matrix dimensions are not equal")
new = dok_matrix(self.shape, dtype=self.dtype)
new.update(self)
for key in other:
@@ -365,7 +365,7 @@ def __radd__(self, other):
elif isdense(other):
new = other + self.todense()
else:
- raise TypeError, "data type not understood"
+ raise TypeError("data type not understood")
return new
def __neg__(self):
@@ -534,8 +534,8 @@ def resize(self, shape):
non-zero elements that lie outside.
"""
if not isshape(shape):
- raise TypeError, "dimensions must be a 2-tuple of positive"\
- " integers"
+ raise TypeError("dimensions must be a 2-tuple of positive"
+ " integers")
newM, newN = shape
M, N = self.shape
if newM < M or newN < N:
View
4 scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py
@@ -83,7 +83,7 @@ def readMatrix( matrixName, options ):
try:
readMatrix = formatMap[options.format]
except:
- raise ValueError, 'unsupported format: %s' % options.format
+ raise ValueError('unsupported format: %s' % options.format)
print 'format:', options.format
@@ -180,7 +180,7 @@ def main():
try:
import pylab
except ImportError:
- raise ImportError, "could not import pylab"
+ raise ImportError("could not import pylab")
times = np.array( times )
print times
pylab.plot( times[:,0], 'b-o' )
View
46 scipy/sparse/linalg/dsolve/umfpack/umfpack.py
@@ -277,7 +277,7 @@ def __init__( self, family = 'di', **kwargs ):
Struct.__init__( self, **kwargs )
if family not in umfFamilyTypes.keys():
- raise TypeError, 'wrong family: %s' % family
+ raise TypeError('wrong family: %s' % family)
self.family = family
self.control = np.zeros( (UMFPACK_CONTROL, ), dtype = np.double )
@@ -328,25 +328,25 @@ def _getIndx( self, mtx ):
indx = mtx.indices
self.isCSR = 1
else:
- raise TypeError, 'must be a CSC/CSR matrix (is %s)' % mtx.__class__
+ raise TypeError('must be a CSC/CSR matrix (is %s)' % mtx.__class__)
##
# Should check types of indices to correspond to familyTypes.
if self.family[1] == 'i':
if (indx.dtype != np.dtype('i')) \
or mtx.indptr.dtype != np.dtype('i'):
- raise ValueError, 'matrix must have int indices'
+ raise ValueError('matrix must have int indices')
else:
if (indx.dtype != np.dtype('l')) \
or mtx.indptr.dtype != np.dtype('l'):
- raise ValueError, 'matrix must have long indices'
+ raise ValueError('matrix must have long indices')
if self.isReal:
if mtx.data.dtype != np.dtype('<f8'):
- raise ValueError, 'matrix must have float64 values'
+ raise ValueError('matrix must have float64 values')
else:
if mtx.data.dtype != np.dtype('<c16'):
- raise ValueError, 'matrix must have complex128 values'
+ raise ValueError('matrix must have complex128 values')
return indx
@@ -379,8 +379,8 @@ def symbolic( self, mtx ):
## print status, self._symbolic
if status != UMFPACK_OK:
- raise RuntimeError, '%s failed with %s' % (self.funs.symbolic,
- umfStatus[status])
+ raise RuntimeError('%s failed with %s' % (self.funs.symbolic,
+ umfStatus[status]))
self.mtx = mtx
@@ -432,8 +432,8 @@ def numeric( self, mtx ):
else:
break
if failCount >= 2:
- raise RuntimeError, '%s failed with %s' % (self.funs.numeric,
- umfStatus[status])
+ raise RuntimeError('%s failed with %s' % (self.funs.numeric,
+ umfStatus[status]))
##
# 14.12.2005, c
@@ -507,7 +507,7 @@ def solve( self, sys, mtx, rhs, autoTranspose = False ):
assumes CSC internally
"""
if sys not in umfSys:
- raise ValueError, 'sys must be in' % umfSys
+ raise ValueError('sys must be in' % umfSys)
if autoTranspose and self.isCSR:
##
@@ -517,13 +517,13 @@ def solve( self, sys, mtx, rhs, autoTranspose = False ):
if sys in umfSys_transposeMap[ii]:
sys = umfSys_transposeMap[ii][sys]
else:
- raise RuntimeError, 'autoTranspose ambiguous, switch it off'
+ raise RuntimeError('autoTranspose ambiguous, switch it off')
if self._numeric is not None:
if self.mtx is not mtx:
- raise ValueError, 'must be called with same matrix as numeric()'
+ raise ValueError('must be called with same matrix as numeric()')
else:
- raise RuntimeError, 'numeric() not called'
+ raise RuntimeError('numeric() not called')
indx = self._getIndx( mtx )
@@ -551,8 +551,8 @@ def solve( self, sys, mtx, rhs, autoTranspose = False ):
print 'zeroing nan and inf entries...'
sol[~np.isfinite( sol )] = 0.0
else:
- raise RuntimeError, '%s failed with %s' % (self.funs.solve,
- umfStatus[status])
+ raise RuntimeError('%s failed with %s' % (self.funs.solve,
+ umfStatus[status]))
econd = 1.0 / self.info[UMFPACK_RCOND]
if econd > self.maxCond:
print 'warning: (almost) singular matrix! '\
@@ -581,7 +581,7 @@ def linsolve( self, sys, mtx, rhs, autoTranspose = False ):
# print self.family
if sys not in umfSys:
- raise ValueError, 'sys must be in' % umfSys
+ raise ValueError('sys must be in' % umfSys)
if self._numeric is None:
self.numeric( mtx )
@@ -646,8 +646,8 @@ def lu( self, mtx ):
= self.funs.get_lunz( self._numeric )
if status != UMFPACK_OK:
- raise RuntimeError, '%s failed with %s' % (self.funs.get_lunz,
- umfStatus[status])
+ raise RuntimeError('%s failed with %s' % (self.funs.get_lunz,
+ umfStatus[status]))
#allocate storage for decomposition data
i_type = mtx.indptr.dtype
@@ -673,8 +673,8 @@ def lu( self, mtx ):
self._numeric )
if status != UMFPACK_OK:
- raise RuntimeError, '%s failed with %s'\
- % (self.funs.get_numeric, umfStatus[status])
+ raise RuntimeError('%s failed with %s'
+ % (self.funs.get_numeric, umfStatus[status]))
L = sp.csr_matrix((Lx,Lj,Lp),(n_row,min(n_row,n_col)))
U = sp.csc_matrix((Ux,Ui,Up),(min(n_row,n_col),n_col))
@@ -693,8 +693,8 @@ def lu( self, mtx ):
self._numeric)
if status != UMFPACK_OK:
- raise RuntimeError, '%s failed with %s'\
- % (self.funs.get_numeric, umfStatus[status])
+ raise RuntimeError('%s failed with %s'
+ % (self.funs.get_numeric, umfStatus[status]))
Lxz = np.zeros( (lnz,), dtype = np.complex128 )
View
2  scipy/sparse/linalg/isolve/utils.py
@@ -93,7 +93,7 @@ def postprocess(x):
xtype = b.dtype.char
else:
if xtype not in 'fdFD':
- raise ValueError, "xtype must be 'f', 'd', 'F', or 'D'"
+ raise ValueError("xtype must be 'f', 'd', 'F', or 'D'")
b = asarray(b,dtype=xtype) #make b the same type as x
b = b.ravel()
View
4 scipy/sparse/spfuncs.py
@@ -41,7 +41,7 @@ def estimate_blocksize(A,efficiency=0.7):
return (1,1)
if not 0 < efficiency < 1.0:
- raise ValueError,'efficiency must satisfy 0.0 < efficiency < 1.0'
+ raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0')
high_efficiency = (1.0 + efficiency) / 2.0
nnz = float(A.nnz)
@@ -85,7 +85,7 @@ def count_blocks(A,blocksize):
"""
r,c = blocksize
if r < 1 or c < 1:
- raise ValueError,'r and c must be positive'
+ raise ValueError('r and c must be positive')
if isspmatrix_csr(A):
M,N = A.shape
View
4 scipy/sparse/sputils.py
@@ -44,7 +44,7 @@ def upcast(*args):
if np.can_cast(sample.dtype,t):
return t
- raise TypeError,'no supported conversion for types: %s' % args
+ raise TypeError('no supported conversion for types: %s' % args)
def to_native(A):
@@ -68,7 +68,7 @@ def getdtype(dtype, a=None, default=None):
newdtype = np.dtype(default)
canCast = False
else:
- raise TypeError, "could not interpret data type"
+ raise TypeError("could not interpret data type")
else:
newdtype = np.dtype(dtype)
Please sign in to comment.
Something went wrong with that request. Please try again.