Browse files

Merge pull request #750 from carlobaldassi/linalg

Reorganization of linalg.jl in separate files
  • Loading branch information...
2 parents 9135075 + 022d924 commit bfd7bccc49b599c3ed3d38a5a6aa9472cce88b83 @JeffBezanson JeffBezanson committed Apr 24, 2012
Showing with 823 additions and 600 deletions.
  1. +61 −433 base/linalg.jl
  2. +1 −1 base/linalg_blas.jl
  3. +466 −0 base/linalg_dense.jl
  4. +1 −0 base/sysimg.jl
  5. +1 −86 extras/bitarray.jl
  6. +127 −0 extras/linalg_bitarray.jl
  7. +152 −0 extras/linalg_sparse.jl
  8. +7 −80 extras/sparse.jl
  9. +6 −0 test/bitarray.jl
  10. +1 −0 test/sparse.jl
View
494 base/linalg.jl
@@ -1,274 +1,61 @@
-## linalg.jl: Basic Linear Algebra functions ##
+## linalg.jl: Basic Linear Algebra interface specifications ##
+#
+# This file mostly contains commented functions which are supposed
+# to be defined in type-specific linalg_<type>.jl files.
+#
+# It only actually defines default argument versions which will
+# fail without a proper implementation anyway.
-aCb(x::AbstractVector, y::AbstractVector) = [dot(x, y)]
-aTb{T<:Real}(x::AbstractVector{T}, y::AbstractVector{T}) = [dot(x, y)]
-function dot(x::AbstractVector, y::AbstractVector)
- s = zero(eltype(x))
- for i=1:length(x)
- s += conj(x[i])*y[i]
- end
- s
-end
-
-cross(a::AbstractVector, b::AbstractVector) =
- [a[2]*b[3]-a[3]*b[2], a[3]*b[1]-a[1]*b[3], a[1]*b[2]-a[2]*b[1]]
-
-# linalg_blas.jl defines matmul for floats; other integer and mixed precision
-# cases are handled here
-
-# TODO: It will be faster for large matrices to convert to float,
-# call BLAS, and convert back to required type.
-
-# TODO: support transposed arguments
-function (*){T,S}(A::AbstractMatrix{T}, B::AbstractVector{S})
- mA = size(A, 1)
- mB = size(B, 1)
- C = zeros(promote_type(T,S), mA)
- for k = 1:mB
- b = B[k]
- for i = 1:mA
- C[i] += A[i, k] * b
- end
- end
- return C
-end
-
-function (*){T,S}(A::AbstractVector{S}, B::AbstractMatrix{T})
- return reshape(A,length(A),1)*B
-end
-
-# TODO: support transposed arguments
-function (*){T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S})
- (mA, nA) = size(A)
- (mB, nB) = size(B)
- if mA == 2 && nA == 2 && nB == 2; return matmul2x2('N','N',A,B); end
- if mA == 3 && nA == 3 && nB == 3; return matmul3x3('N','N',A,B); end
- C = zeros(promote_type(T,S), mA, nB)
- z = zero(eltype(C))
+#aCb(x::AbstractVector, y::AbstractVector)
+#aTb{T<:Real}(x::AbstractVector{T}, y::AbstractVector{T})
- for jb = 1:50:nB
- jlim = min(jb+50-1,nB)
- for ib = 1:50:mA
- ilim = min(ib+50-1,mA)
- for kb = 1:50:mB
- klim = min(kb+50-1,mB)
- for j=jb:jlim
- boffs = (j-1)*mB
- coffs = (j-1)*mA
- for i=ib:ilim
- s = z
- for k=kb:klim
- s += A[i,k] * B[boffs+k]
- end
- C[coffs+i] += s
- end
- end
- end
- end
- end
+#dot(x::AbstractVector, y::AbstractVector)
- return C
-end
-
-# multiply 2x2 matrices
-function matmul2x2{T,S}(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S})
- R = promote_type(T,S)
- C = Array(R, 2, 2)
-
- if tA == 'T'
- A11 = A[1,1]; A12 = A[2,1]; A21 = A[1,2]; A22 = A[2,2]
- elseif tA == 'C'
- A11 = conj(A[1,1]); A12 = conj(A[2,1]); A21 = conj(A[1,2]); A22 = conj(A[2,2])
- else
- A11 = A[1,1]; A12 = A[1,2]; A21 = A[2,1]; A22 = A[2,2]
- end
- if tB == 'T'
- B11 = B[1,1]; B12 = B[2,1]; B21 = B[1,2]; B22 = B[2,2]
- elseif tB == 'C'
- B11 = conj(B[1,1]); B12 = conj(B[2,1]); B21 = conj(B[1,2]); B22 = conj(B[2,2])
- else
- B11 = B[1,1]; B12 = B[1,2]; B21 = B[2,1]; B22 = B[2,2]
- end
-
- C[1,1] = A11*B11 + A12*B21
- C[1,2] = A11*B12 + A12*B22
- C[2,1] = A21*B11 + A22*B21
- C[2,2] = A21*B12 + A22*B22
-
- return C
-end
-
-function matmul3x3{T,S}(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S})
- R = promote_type(T,S)
- C = Array(R, 3, 3)
-
- if tA == 'T'
- A11 = A[1,1]; A12 = A[2,1]; A13 = A[3,1];
- A21 = A[1,2]; A22 = A[2,2]; A23 = A[3,2];
- A31 = A[1,3]; A32 = A[2,3]; A33 = A[3,3];
- elseif tA == 'C'
- A11 = conj(A[1,1]); A12 = conj(A[2,1]); A13 = conj(A[3,1]);
- A21 = conj(A[1,2]); A22 = conj(A[2,2]); A23 = conj(A[3,2]);
- A31 = conj(A[1,3]); A32 = conj(A[2,3]); A33 = conj(A[3,3]);
- else
- A11 = A[1,1]; A12 = A[1,2]; A13 = A[1,3];
- A21 = A[2,1]; A22 = A[2,2]; A23 = A[2,3];
- A31 = A[3,1]; A32 = A[3,2]; A33 = A[3,3];
- end
-
- if tB == 'T'
- B11 = B[1,1]; B12 = B[2,1]; B13 = B[3,1];
- B21 = B[1,2]; B22 = B[2,2]; B23 = B[3,2];
- B31 = B[1,3]; B32 = B[2,3]; B33 = B[3,3];
- elseif tB == 'C'
- B11 = conj(B[1,1]); B12 = conj(B[2,1]); B13 = conj(B[3,1]);
- B21 = conj(B[1,2]); B22 = conj(B[2,2]); B23 = conj(B[3,2]);
- B31 = conj(B[1,3]); B32 = conj(B[2,3]); B33 = conj(B[3,3]);
- else
- B11 = B[1,1]; B12 = B[1,2]; B13 = B[1,3];
- B21 = B[2,1]; B22 = B[2,2]; B23 = B[2,3];
- B31 = B[3,1]; B32 = B[3,2]; B33 = B[3,3];
- end
-
- C[1,1] = A11*B11 + A12*B21 + A13*B31
- C[1,2] = A11*B12 + A12*B22 + A13*B32
- C[1,3] = A11*B13 + A12*B23 + A13*B33
-
- C[2,1] = A21*B11 + A22*B21 + A23*B31
- C[2,2] = A21*B12 + A22*B22 + A23*B32
- C[2,3] = A21*B13 + A22*B23 + A23*B33
-
- C[3,1] = A31*B11 + A32*B21 + A33*B31
- C[3,2] = A31*B12 + A32*B22 + A33*B32
- C[3,3] = A31*B13 + A32*B23 + A33*B33
-
- return C
-end
-
-
-triu(M) = triu(M,0)
-tril(M) = tril(M,0)
-triu{T}(M::AbstractMatrix{T}, k) = [ j-i >= k ? M[i,j] : zero(T) |
- i=1:size(M,1), j=1:size(M,2) ]
-tril{T}(M::AbstractMatrix{T}, k) = [ j-i <= k ? M[i,j] : zero(T) |
- i=1:size(M,1), j=1:size(M,2) ]
-
-diff(a::Vector) = [ a[i+1] - a[i] | i=1:length(a)-1 ]
-
-function diff(a::Matrix, dim)
- if dim == 1
- [ a[i+1,j] - a[i,j] | i=1:size(a,1)-1, j=1:size(a,2) ]
- else
- [ a[i,j+1] - a[i,j] | i=1:size(a,1), j=1:size(a,2)-1 ]
- end
-end
-
-diff(a::Matrix) = diff(a, 1)
-
-gradient(F::Vector) = gradient(F, [1:length(F)])
-gradient(F::Vector, h::Real) = gradient(F, [h*(1:length(F))])
-function gradient(F::Vector, h::Vector)
- n = length(F)
- g = similar(F)
- if n > 0
- g[1] = 0
- end
- if n > 1
- g[1] = (F[2] - F[1]) / (h[2] - h[1])
- g[n] = (F[n] - F[n-1]) / (h[end] - h[end-1])
- end
- if n > 2
- h = h[3:n] - h[1:n-2]
- g[2:n-1] = (F[3:n] - F[1:n-2]) ./ h
- end
- return g
-end
-
-diag(A::Vector) = error("Perhaps you meant to use diagm().")
-diag(A::Matrix) = [ A[i,i] | i=1:min(size(A,1),size(A,2)) ]
-
-function diagm{T}(v::Union(Vector{T},Matrix{T}))
- if isa(v, Matrix)
- if (size(v,1) != 1 && size(v,2) != 1)
- error("Input should be nx1 or 1xn")
- end
- end
+#cross(a::AbstractVector, b::AbstractVector)
- n = numel(v)
- a = zeros(T, n, n)
- for i=1:n
- a[i,i] = v[i]
- end
+#(*){T,S}(A::AbstractMatrix{T}, B::AbstractVector{S})
+#(*){T,S}(A::AbstractVector{S}, B::AbstractMatrix{T})
+#(*){T,S}(A::AbstractMatrix{T}, B::AbstractMatrix{S})
- return a
-end
+triu(M::AbstractMatrix) = triu(M,0)
+tril(M::AbstractMatrix) = tril(M,0)
+#triu{T}(M::AbstractMatrix{T}, k::Integer)
+#tril{T}(M::AbstractMatrix{T}, k::Integer)
-function norm(x::Vector, p::Number)
- if p == Inf
- return max(abs(x))
- elseif p == -Inf
- return min(abs(x))
- else
- return sum(abs(x).^p).^(1/p)
- end
-end
+#diff(a::AbstractVector)
-norm(x::Vector) = sqrt(real(dot(x,x)))
+#diff(a::AbstractMatrix, dim::Integer)
+diff(a::AbstractMatrix) = diff(a, 1)
-function norm(A::Matrix, p)
- if size(A,1) == 1 || size(A,2) == 1
- return norm(reshape(A, numel(A)), p)
- elseif p == 1
- return max(sum(abs(A),1))
- elseif p == 2
- return max(svd(A)[2])
- elseif p == Inf
- max(sum(abs(A),2))
- elseif p == "fro"
- return sqrt(sum(diag(A'*A)))
- end
-end
+gradient(F::AbstractVector) = gradient(F, [1:length(F)])
+gradient(F::AbstractVector, h::Real) = gradient(F, [h*(1:length(F))])
+#gradient(F::AbstractVector, h::AbstractVector)
-norm(A::Matrix) = norm(A, 2)
-rank(A::Matrix, tol::Real) = sum(svd(A)[2] > tol)
-rank(A::Matrix) = rank(A, 0)
+diag(A::AbstractVector) = error("Perhaps you meant to use diagm().")
+#diag(A::AbstractMatrix)
-# trace(A::Matrix) = sum(diag(A))
+#diagm{T}(v::Union(AbstractVector{T},AbstractMatrix{T}))
-function trace{T}(A::Matrix{T})
- t = zero(T)
- for i=1:min(size(A))
- t += A[i,i]
- end
- return t
-end
+#norm(x::AbstractVector, p::Number)
+#norm(x::AbstractVector)
-kron(a::Vector, b::Vector) = [ a[i]*b[j] | i=1:length(a), j=1:length(b) ]
+#norm(A::AbstractMatrix, p)
+norm(A::AbstractMatrix) = norm(A, 2)
+rank(A::AbstractMatrix, tol::Real) = sum(svd(A)[2] > tol)
+rank(A::AbstractMatrix) = rank(A, 0)
-function kron{T,S}(a::Matrix{T}, b::Matrix{S})
- R = Array(promote_type(T,S), size(a,1)*size(b,1), size(a,2)*size(b,2))
+#trace{T}(A::AbstractMatrix{T})
- m = 1
- for j = 1:size(a,2)
- for l = 1:size(b,2)
- for i = 1:size(a,1)
- aij = a[i,j]
- for k = 1:size(b,1)
- R[m] = aij*b[k,l]
- m += 1
- end
- end
- end
- end
- R
-end
+#kron(a::AbstractVector, b::AbstractVector)
+#kron{T,S}(a::AbstractMatrix{T}, b::AbstractMatrix{S})
-det(a::Matrix) = prod(diag(qr(a)[2]))
-inv(a::Matrix) = a \ one(a)
-cond(a::Matrix, p) = norm(a, p) * norm(inv(a), p)
-cond(a::Matrix) = cond(a, 2)
+#det(a::AbstractMatrix)
+#inv(a::AbstractMatrix)
+#cond(a::AbstractMatrix, p)
+cond(a::AbstractMatrix) = cond(a, 2)
+#XXX this doens't seem to belong here
function randsym(n)
a = randn(n,n)
for j=1:n-1, i=j+1:n
@@ -279,196 +66,37 @@ function randsym(n)
a
end
-function issym(A::Matrix)
- m, n = size(A)
- if m != n; error("matrix must be square, got $(m)x$(n)"); end
- for i = 1:(n-1), j = (i+1):n
- if A[i,j] != A[j,i]
- return false
- end
- end
- return true
-end
+#issym(A::AbstractMatrix)
# Randomized matrix symmetry test
-# Theorem: Matrix is symmetric iff x'*A*y == y'*A*x, for randomly chosen x and y.
-function issym_rnd(A::AbstractArray)
- m, n = size(A)
- if m != n; return false; end
- ntrials = 5
+# Theorem: AbstractMatrix is symmetric iff x'*A*y == y'*A*x, for randomly chosen x and y.
+#issym_rnd(A::AbstractArray)
- for i=1:ntrials
- x = randn(n)
- y = randn(n)
- if (x'*A*y - y'*A*x)[1] > 1e-6; return false; end
- end
-
- return true
-end
-
-function ishermitian(A::Matrix)
- m, n = size(A)
- if m != n; error("matrix must be square, got $(m)x$(n)"); end
- for i = 1:n, j = i:n
- if A[i,j] != conj(A[j,i])
- return false
- end
- end
- return true
-end
+#ishermitian(A::AbstractMatrix)
+#istriu(A::AbstractMatrix)
+#istril(A::AbstractMatrix)
-function istriu(A::Matrix)
- m, n = size(A)
- if m != n; error("matrix must be square, got $(m)x$(n)"); end
- for i = 1:n, j = 1:n
- if A[i,j] != 0 && j < i
- return false
- end
- end
- return true
-end
-
-function istril(A::Matrix)
- m, n = size(A)
- if m != n; error("matrix must be square, got $(m)x$(n)"); end
- for i = 1:n, j = n:-1:1
- if A[i,j] != 0 && j > i
- return false
- end
- end
- return true
-end
-
-function linreg(x, y)
- M = [ones(length(x)) x]
- Mt = M'
- ((Mt*M)\Mt)*y
-end
+# XXX needs type annotation
+#linreg(x, y)
# weighted least squares
-function linreg(x, y, w)
- w = sqrt(w)
- M = [w w.*x]
- Mt = M'
- ((Mt*M)\Mt)*(w.*y)
-end
+# XXX needs type annotation
+#linreg(x, y, w)
# multiply by diagonal matrix as vector
-function diagmm!(C::Matrix, A::Matrix, b::Vector)
- m, n = size(A)
- if n != length(b)
- error("argument dimensions do not match")
- end
- for j = 1:n
- bj = b[j]
- for i = 1:m
- C[i,j] = A[i,j]*bj
- end
- end
- return C
-end
-
-function diagmm!(C::Matrix, b::Vector, A::Matrix)
- m, n = size(A)
- if m != length(b)
- error("argument dimensions do not match")
- end
- for j=1:n
- for i=1:m
- C[i,j] = A[i,j]*b[i]
- end
- end
- return C
-end
-
-diagmm!(A::Matrix, b::Vector) = diagmm!(A,A,b)
-diagmm!(b::Vector, A::Matrix) = diagmm!(A,b,A)
+#diagmm!(C::AbstractMatrix, A::AbstractMatrix, b::AbstractVector)
-diagmm(A::Matrix, b::Vector) =
- diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), A, b)
-diagmm(b::Vector, A::Matrix) =
- diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), b, A)
+#diagmm!(C::AbstractMatrix, b::AbstractVector, A::AbstractMatrix)
-^(A::AbstractMatrix, p::Integer) = p < 0 ? inv(A^-p) : power_by_squaring(A,p)
+diagmm!(A::AbstractMatrix, b::AbstractVector) = diagmm!(A,A,b)
+diagmm!(b::AbstractVector, A::AbstractMatrix) = diagmm!(A,b,A)
-function ^(A::AbstractMatrix, p::Number)
- if integer_valued(p)
- ip = integer(real(p))
- if ip < 0
- return inv(power_by_squaring(A, -ip))
- else
- return power_by_squaring(A, ip)
- end
- end
- if size(A,1) != size(A,2)
- error("matrix must be square")
- end
- (v, X) = eig(A)
- if isreal(v) && any(v<0)
- v = complex(v)
- end
- if ishermitian(A)
- Xinv = X'
- else
- Xinv = inv(X)
- end
- diagmm(X, v.^p)*Xinv
-end
+#diagmm(A::AbstractMatrix, b::AbstractVector)
+#diagmm(b::AbstractVector, A::AbstractMatrix)
-function findmax(a)
- m = typemin(eltype(a))
- mi = 0
- for i=1:length(a)
- if a[i] > m
- m = a[i]
- mi = i
- end
- end
- return (m, mi)
-end
+#^(A::AbstractMatrix, p::Number)
-function findmin(a)
- m = typemax(eltype(a))
- mi = 0
- for i=1:length(a)
- if a[i] < m
- m = a[i]
- mi = i
- end
- end
- return (m, mi)
-end
+#findmax(a::AbstractArray)
+#findmin(a::AbstractArray)
-function rref{T}(A::Matrix{T})
- nr, nc = size(A)
- U = copy_to(similar(A,Float64), A)
- e = eps(norm(U,Inf))
- i = j = 1
- while i <= nr && j <= nc
- (m, mi) = findmax(abs(U[i:nr,j]))
- mi = mi+i - 1
- if m <= e
- U[i:nr,j] = 0
- j += 1
- else
- for k=j:nc
- U[i, k], U[mi, k] = U[mi, k], U[i, k]
- end
- d = U[i,j]
- for k = j:nc
- U[i,k] /= d
- end
- for k = 1:nr
- if k != i
- d = U[k,j]
- for l = j:nc
- U[k,l] -= d*U[i,l]
- end
- end
- end
- i += 1
- j += 1
- end
- end
- return U
-end
+#rref{T}(A::AbstractMatrix{T})
View
2 base/linalg_blas.jl
@@ -240,7 +240,7 @@ function (*){T<:Union(Float64,Float32,Complex128,Complex64)}(A::StridedMatrix{T}
if nA != mX; error("*: argument shapes do not match"); end
if stride(A, 1) != 1
- return invoke(*, (AbstractMatrix, AbstractVector), A, X)
+ return invoke(*, (Matrix, Vector), A, X)
end
# Result array does not need to be initialized as long as beta==0
View
466 base/linalg_dense.jl
@@ -0,0 +1,466 @@
+## linalg_dense.jl: Basic Linear Algebra functions for dense representations ##
+#
+# note that many functions have specific versions for Float/Complex arguments
+# which use BLAS instead
+
+aCb(x::Vector, y::Vector) = [dot(x, y)]
+aTb{T<:Real}(x::Vector{T}, y::Vector{T}) = [dot(x, y)]
+
+function dot(x::Vector, y::Vector)
+ s = zero(eltype(x))
+ for i=1:length(x)
+ s += conj(x[i])*y[i]
+ end
+ s
+end
+
+cross(a::Vector, b::Vector) =
+ [a[2]*b[3]-a[3]*b[2], a[3]*b[1]-a[1]*b[3], a[1]*b[2]-a[2]*b[1]]
+
+# linalg_blas.jl defines matmul for floats; other integer and mixed precision
+# cases are handled here
+
+# TODO: It will be faster for large matrices to convert to float,
+# call BLAS, and convert back to required type.
+
+# TODO: support transposed arguments
+function (*){T,S}(A::Matrix{T}, B::Vector{S})
+ mA = size(A, 1)
+ mB = size(B, 1)
+ C = zeros(promote_type(T,S), mA)
+ for k = 1:mB
+ b = B[k]
+ for i = 1:mA
+ C[i] += A[i, k] * b
+ end
+ end
+ return C
+end
+
+function (*){T,S}(A::Vector{S}, B::Matrix{T})
+ return reshape(A,length(A),1)*B
+end
+
+# TODO: support transposed arguments
+function (*){T,S}(A::Matrix{T}, B::Matrix{S})
+ (mA, nA) = size(A)
+ (mB, nB) = size(B)
+ if mA == 2 && nA == 2 && nB == 2; return matmul2x2('N','N',A,B); end
+ if mA == 3 && nA == 3 && nB == 3; return matmul3x3('N','N',A,B); end
+ C = zeros(promote_type(T,S), mA, nB)
+ z = zero(eltype(C))
+
+ for jb = 1:50:nB
+ jlim = min(jb+50-1,nB)
+ for ib = 1:50:mA
+ ilim = min(ib+50-1,mA)
+ for kb = 1:50:mB
+ klim = min(kb+50-1,mB)
+ for j=jb:jlim
+ boffs = (j-1)*mB
+ coffs = (j-1)*mA
+ for i=ib:ilim
+ s = z
+ for k=kb:klim
+ s += A[i,k] * B[boffs+k]
+ end
+ C[coffs+i] += s
+ end
+ end
+ end
+ end
+ end
+
+ return C
+end
+
+# multiply 2x2 matrices
+function matmul2x2{T,S}(tA, tB, A::Matrix{T}, B::Matrix{S})
+ R = promote_type(T,S)
+ C = Array(R, 2, 2)
+
+ if tA == 'T'
+ A11 = A[1,1]; A12 = A[2,1]; A21 = A[1,2]; A22 = A[2,2]
+ elseif tA == 'C'
+ A11 = conj(A[1,1]); A12 = conj(A[2,1]); A21 = conj(A[1,2]); A22 = conj(A[2,2])
+ else
+ A11 = A[1,1]; A12 = A[1,2]; A21 = A[2,1]; A22 = A[2,2]
+ end
+ if tB == 'T'
+ B11 = B[1,1]; B12 = B[2,1]; B21 = B[1,2]; B22 = B[2,2]
+ elseif tB == 'C'
+ B11 = conj(B[1,1]); B12 = conj(B[2,1]); B21 = conj(B[1,2]); B22 = conj(B[2,2])
+ else
+ B11 = B[1,1]; B12 = B[1,2]; B21 = B[2,1]; B22 = B[2,2]
+ end
+
+ C[1,1] = A11*B11 + A12*B21
+ C[1,2] = A11*B12 + A12*B22
+ C[2,1] = A21*B11 + A22*B21
+ C[2,2] = A21*B12 + A22*B22
+
+ return C
+end
+
+function matmul3x3{T,S}(tA, tB, A::Matrix{T}, B::Matrix{S})
+ R = promote_type(T,S)
+ C = Array(R, 3, 3)
+
+ if tA == 'T'
+ A11 = A[1,1]; A12 = A[2,1]; A13 = A[3,1];
+ A21 = A[1,2]; A22 = A[2,2]; A23 = A[3,2];
+ A31 = A[1,3]; A32 = A[2,3]; A33 = A[3,3];
+ elseif tA == 'C'
+ A11 = conj(A[1,1]); A12 = conj(A[2,1]); A13 = conj(A[3,1]);
+ A21 = conj(A[1,2]); A22 = conj(A[2,2]); A23 = conj(A[3,2]);
+ A31 = conj(A[1,3]); A32 = conj(A[2,3]); A33 = conj(A[3,3]);
+ else
+ A11 = A[1,1]; A12 = A[1,2]; A13 = A[1,3];
+ A21 = A[2,1]; A22 = A[2,2]; A23 = A[2,3];
+ A31 = A[3,1]; A32 = A[3,2]; A33 = A[3,3];
+ end
+
+ if tB == 'T'
+ B11 = B[1,1]; B12 = B[2,1]; B13 = B[3,1];
+ B21 = B[1,2]; B22 = B[2,2]; B23 = B[3,2];
+ B31 = B[1,3]; B32 = B[2,3]; B33 = B[3,3];
+ elseif tB == 'C'
+ B11 = conj(B[1,1]); B12 = conj(B[2,1]); B13 = conj(B[3,1]);
+ B21 = conj(B[1,2]); B22 = conj(B[2,2]); B23 = conj(B[3,2]);
+ B31 = conj(B[1,3]); B32 = conj(B[2,3]); B33 = conj(B[3,3]);
+ else
+ B11 = B[1,1]; B12 = B[1,2]; B13 = B[1,3];
+ B21 = B[2,1]; B22 = B[2,2]; B23 = B[2,3];
+ B31 = B[3,1]; B32 = B[3,2]; B33 = B[3,3];
+ end
+
+ C[1,1] = A11*B11 + A12*B21 + A13*B31
+ C[1,2] = A11*B12 + A12*B22 + A13*B32
+ C[1,3] = A11*B13 + A12*B23 + A13*B33
+
+ C[2,1] = A21*B11 + A22*B21 + A23*B31
+ C[2,2] = A21*B12 + A22*B22 + A23*B32
+ C[2,3] = A21*B13 + A22*B23 + A23*B33
+
+ C[3,1] = A31*B11 + A32*B21 + A33*B31
+ C[3,2] = A31*B12 + A32*B22 + A33*B32
+ C[3,3] = A31*B13 + A32*B23 + A33*B33
+
+ return C
+end
+
+
+triu{T}(M::Matrix{T}, k::Integer) = [ j-i >= k ? M[i,j] : zero(T) |
+ i=1:size(M,1), j=1:size(M,2) ]
+tril{T}(M::Matrix{T}, k::Integer) = [ j-i <= k ? M[i,j] : zero(T) |
+ i=1:size(M,1), j=1:size(M,2) ]
+
+diff(a::Vector) = [ a[i+1] - a[i] | i=1:length(a)-1 ]
+
+function diff(a::Matrix, dim::Integer)
+ if dim == 1
+ [ a[i+1,j] - a[i,j] | i=1:size(a,1)-1, j=1:size(a,2) ]
+ else
+ [ a[i,j+1] - a[i,j] | i=1:size(a,1), j=1:size(a,2)-1 ]
+ end
+end
+
+function gradient(F::Vector, h::Vector)
+ n = length(F)
+ g = similar(F)
+ if n > 0
+ g[1] = 0
+ end
+ if n > 1
+ g[1] = (F[2] - F[1]) / (h[2] - h[1])
+ g[n] = (F[n] - F[n-1]) / (h[end] - h[end-1])
+ end
+ if n > 2
+ h = h[3:n] - h[1:n-2]
+ g[2:n-1] = (F[3:n] - F[1:n-2]) ./ h
+ end
+ return g
+end
+
+diag(A::Matrix) = [ A[i,i] | i=1:min(size(A,1),size(A,2)) ]
+
+function diagm{T}(v::Union(Vector{T},Matrix{T}))
+ if isa(v, Matrix)
+ if (size(v,1) != 1 && size(v,2) != 1)
+ error("Input should be nx1 or 1xn")
+ end
+ end
+
+ n = numel(v)
+ a = zeros(T, n, n)
+ for i=1:n
+ a[i,i] = v[i]
+ end
+
+ return a
+end
+
+function norm(x::Vector, p::Number)
+ if p == Inf
+ return max(abs(x))
+ elseif p == -Inf
+ return min(abs(x))
+ else
+ return sum(abs(x).^p).^(1/p)
+ end
+end
+
+norm(x::Vector) = sqrt(real(dot(x,x)))
+
+function norm(A::Matrix, p)
+ if size(A,1) == 1 || size(A,2) == 1
+ return norm(reshape(A, numel(A)), p)
+ elseif p == 1
+ return max(sum(abs(A),1))
+ elseif p == 2
+ return max(svd(A)[2])
+ elseif p == Inf
+ max(sum(abs(A),2))
+ elseif p == "fro"
+ return sqrt(sum(diag(A'*A)))
+ end
+end
+
+norm(A::Matrix) = norm(A, 2)
+rank(A::Matrix, tol::Real) = sum(svd(A)[2] > tol)
+rank(A::Matrix) = rank(A, 0)
+
+# trace(A::Matrix) = sum(diag(A))
+
+function trace{T}(A::Matrix{T})
+ t = zero(T)
+ for i=1:min(size(A))
+ t += A[i,i]
+ end
+ return t
+end
+
+kron(a::Vector, b::Vector) = [ a[i]*b[j] | i=1:length(a), j=1:length(b) ]
+
+function kron{T,S}(a::Matrix{T}, b::Matrix{S})
+ R = Array(promote_type(T,S), size(a,1)*size(b,1), size(a,2)*size(b,2))
+
+ m = 1
+ for j = 1:size(a,2)
+ for l = 1:size(b,2)
+ for i = 1:size(a,1)
+ aij = a[i,j]
+ for k = 1:size(b,1)
+ R[m] = aij*b[k,l]
+ m += 1
+ end
+ end
+ end
+ end
+ R
+end
+
+det(a::Matrix) = prod(diag(qr(a)[2]))
+inv(a::Matrix) = a \ one(a)
+cond(a::Matrix, p) = norm(a, p) * norm(inv(a), p)
+cond(a::Matrix) = cond(a, 2)
+
+# XXX this was left in linalg.jl
+# it seems to belong somewhere else though
+#function randsym(n)
+
+function issym(A::Matrix)
+ m, n = size(A)
+ if m != n; error("matrix must be square, got $(m)x$(n)"); end
+ for i = 1:(n-1), j = (i+1):n
+ if A[i,j] != A[j,i]
+ return false
+ end
+ end
+ return true
+end
+
+# Randomized matrix symmetry test
+# Theorem: Matrix is symmetric iff x'*A*y == y'*A*x, for randomly chosen x and y.
+function issym_rnd(A::Array)
+ m, n = size(A)
+ if m != n; return false; end
+ ntrials = 5
+
+ for i=1:ntrials
+ x = randn(n)
+ y = randn(n)
+ if (x'*A*y - y'*A*x)[1] > 1e-6; return false; end
+ end
+
+ return true
+end
+
+function ishermitian(A::Matrix)
+ m, n = size(A)
+ if m != n; error("matrix must be square, got $(m)x$(n)"); end
+ for i = 1:n, j = i:n
+ if A[i,j] != conj(A[j,i])
+ return false
+ end
+ end
+ return true
+end
+
+function istriu(A::Matrix)
+ m, n = size(A)
+ if m != n; error("matrix must be square, got $(m)x$(n)"); end
+ for i = 1:n, j = 1:n
+ if A[i,j] != 0 && j < i
+ return false
+ end
+ end
+ return true
+end
+
+function istril(A::Matrix)
+ m, n = size(A)
+ if m != n; error("matrix must be square, got $(m)x$(n)"); end
+ for i = 1:n, j = n:-1:1
+ if A[i,j] != 0 && j > i
+ return false
+ end
+ end
+ return true
+end
+
+# XXX needs type annotation
+function linreg(x, y)
+ M = [ones(length(x)) x]
+ Mt = M'
+ ((Mt*M)\Mt)*y
+end
+
+# weighted least squares
+# XXX needs type annotation
+function linreg(x, y, w)
+ w = sqrt(w)
+ M = [w w.*x]
+ Mt = M'
+ ((Mt*M)\Mt)*(w.*y)
+end
+
+# multiply by diagonal matrix as vector
+function diagmm!(C::Matrix, A::Matrix, b::Vector)
+ m, n = size(A)
+ if n != length(b)
+ error("argument dimensions do not match")
+ end
+ for j = 1:n
+ bj = b[j]
+ for i = 1:m
+ C[i,j] = A[i,j]*bj
+ end
+ end
+ return C
+end
+
+function diagmm!(C::Matrix, b::Vector, A::Matrix)
+ m, n = size(A)
+ if m != length(b)
+ error("argument dimensions do not match")
+ end
+ for j=1:n
+ for i=1:m
+ C[i,j] = A[i,j]*b[i]
+ end
+ end
+ return C
+end
+
+diagmm!(A::Matrix, b::Vector) = diagmm!(A,A,b)
+diagmm!(b::Vector, A::Matrix) = diagmm!(A,b,A)
+
+diagmm(A::Matrix, b::Vector) =
+ diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), A, b)
+diagmm(b::Vector, A::Matrix) =
+ diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), b, A)
+
+^(A::Matrix, p::Integer) = p < 0 ? inv(A^-p) : power_by_squaring(A,p)
+
+function ^(A::Matrix, p::Number)
+ if integer_valued(p)
+ ip = integer(real(p))
+ if ip < 0
+ return inv(power_by_squaring(A, -ip))
+ else
+ return power_by_squaring(A, ip)
+ end
+ end
+ if size(A,1) != size(A,2)
+ error("matrix must be square")
+ end
+ (v, X) = eig(A)
+ if isreal(v) && any(v<0)
+ v = complex(v)
+ end
+ if ishermitian(A)
+ Xinv = X'
+ else
+ Xinv = inv(X)
+ end
+ diagmm(X, v.^p)*Xinv
+end
+
+function findmax(a::Array)
+ m = typemin(eltype(a))
+ mi = 0
+ for i=1:length(a)
+ if a[i] > m
+ m = a[i]
+ mi = i
+ end
+ end
+ return (m, mi)
+end
+
+function findmin(a::Array)
+ m = typemax(eltype(a))
+ mi = 0
+ for i=1:length(a)
+ if a[i] < m
+ m = a[i]
+ mi = i
+ end
+ end
+ return (m, mi)
+end
+
+function rref{T}(A::Matrix{T})
+ nr, nc = size(A)
+ U = copy_to(similar(A,Float64), A)
+ e = eps(norm(U,Inf))
+ i = j = 1
+ while i <= nr && j <= nc
+ (m, mi) = findmax(abs(U[i:nr,j]))
+ mi = mi+i - 1
+ if m <= e
+ U[i:nr,j] = 0
+ j += 1
+ else
+ for k=j:nc
+ U[i, k], U[mi, k] = U[mi, k], U[i, k]
+ end
+ d = U[i,j]
+ for k = j:nc
+ U[i,k] /= d
+ end
+ for k = 1:nr
+ if k != i
+ d = U[k,j]
+ for l = j:nc
+ U[k,l] -= d*U[i,l]
+ end
+ end
+ end
+ i += 1
+ j += 1
+ end
+ end
+ return U
+end
View
1 base/sysimg.jl
@@ -87,6 +87,7 @@ include("datafmt.jl")
# linear algebra
include("linalg.jl")
+include("linalg_dense.jl")
include("linalg_blas.jl")
include("linalg_lapack.jl")
View
87 extras/bitarray.jl
@@ -939,21 +939,7 @@ end
(.*)(x::Number, B::BitArray) = x .* bitunpack(B)
(.*)(A::BitArray, x::Number) = x .* A
-(*){T<:Integer}(A::BitArray{T}, B::BitArray{T}) = bitunpack(A) * bitunpack(B)
-
-#disambiguations
-(*){T<:Integer}(A::BitMatrix{T}, B::BitVector{T}) = bitunpack(A) * bitunpack(B)
-(*){T<:Integer}(A::BitVector{T}, B::BitMatrix{T}) = bitunpack(A) * bitunpack(B)
-(*){T<:Integer}(A::BitMatrix{T}, B::BitMatrix{T}) = bitunpack(A) * bitunpack(B)
-(*){T<:Integer}(A::BitMatrix{T}, B::AbstractVector) = (*)(bitunpack(A), B)
-(*){T<:Integer}(A::BitVector{T}, B::AbstractMatrix) = (*)(bitunpack(A), B)
-(*){T<:Integer}(A::BitMatrix{T}, B::AbstractMatrix) = (*)(bitunpack(A), B)
-(*){T<:Integer}(A::AbstractVector, B::BitMatrix{T}) = (*)(A, bitunpack(B))
-(*){T<:Integer}(A::AbstractMatrix, B::BitVector{T}) = (*)(A, bitunpack(B))
-(*){T<:Integer}(A::AbstractMatrix, B::BitMatrix{T}) = (*)(A, bitunpack(B))
-#end disambiguations
-
-for f in (:+, :-, :div, :mod, :./, :.^, :/, :\, :*, :.*, :&, :|, :$)
+for f in (:+, :-, :div, :mod, :./, :.^, :.*, :&, :|, :$)
@eval begin
($f)(A::BitArray, B::AbstractArray) = ($f)(bitunpack(A), B)
($f)(A::AbstractArray, B::BitArray) = ($f)(A, bitunpack(B))
@@ -984,16 +970,6 @@ function ($)(x::Number, B::BitArray{Bool})
end
(.*)(x::Number, B::BitArray{Bool}) = x & B
-#disambiguations (TODO: improve!)
-(*)(A::BitMatrix{Bool}, B::BitVector{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
-(*)(A::BitVector{Bool}, B::BitMatrix{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
-(*)(A::BitMatrix{Bool}, B::BitMatrix{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
-#end disambiguations
-
-# TODO: improve this!
-(*)(A::BitArray{Bool}, B::BitArray{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
-
-
## promotion to complex ##
# TODO?
@@ -1752,64 +1728,3 @@ function cumprod{T}(v::BitVector{T})
end
return c
end
-
-## Linear algebra
-
-function dot{T,S}(x::BitVector{T}, y::BitVector{S})
- # simplest way to mimic generic dot behavior
- s = zero(one(T) * one(S))
- for i = 1 : length(x.chunks)
- s += count_ones(x.chunks[i] & y.chunks[i])
- end
- return s
-end
-
-## slower than the unpacked version, which is MUCH slower
-# than blas'd (this one saves storage though, keeping it commented
-# just in case)
-#function aTb{T,S}(A::BitMatrix{T}, B::BitMatrix{S})
- #(mA, nA) = size(A)
- #(mB, nB) = size(B)
- #C = zeros(promote_type(T,S), nA, nB)
- #z = zero(eltype(C))
- #if mA != mB; error("*: argument shapes do not match"); end
- #if mA == 0; return C; end
- #col_ch = _jl_num_bit_chunks(mA)
- ## TODO: avoid using aux chunks and copy (?)
- #aux_chunksA = zeros(Uint64, col_ch)
- #aux_chunksB = [zeros(Uint64, col_ch) | j=1:nB]
- #for j = 1:nB
- #_jl_copy_chunks(aux_chunksB[j], 1, B.chunks, (j-1)*mA+1, mA)
- #end
- #for i = 1:nA
- #_jl_copy_chunks(aux_chunksA, 1, A.chunks, (i-1)*mA+1, mA)
- #for j = 1:nB
- #for k = 1:col_ch
- #C[i, j] += count_ones(aux_chunksA[k] & aux_chunksB[j][k])
- #end
- #end
- #end
- #return C
-#end
-
-#aCb{T, S}(A::BitMatrix{T}, B::BitMatrix{S}) = aTb(A, B)
-
-function triu{T}(B::BitMatrix{T}, k)
- m,n = size(B)
- A = bitzeros(T, m,n)
- for i = max(k+1,1):n
- j = clamp((i - 1) * m + 1, 1, i * m)
- _jl_copy_chunks(A.chunks, j, B.chunks, j, min(i-k, m))
- end
- return A
-end
-
-function tril{T}(B::BitMatrix{T}, k)
- m,n = size(B)
- A = bitzeros(T, m, n)
- for i = 1:min(n, m+k)
- j = clamp((i - 1) * m + i - k, 1, i * m)
- _jl_copy_chunks(A.chunks, j, B.chunks, j, max(m-i+k+1, 0))
- end
- return A
-end
View
127 extras/linalg_bitarray.jl
@@ -0,0 +1,127 @@
+## linalg_bitarray.jl: Basic Linear Algebra functions for BitArrays ##
+
+function dot{T,S}(x::BitVector{T}, y::BitVector{S})
+ # simplest way to mimic Array dot behavior
+ s = zero(one(T) * one(S))
+ for i = 1 : length(x.chunks)
+ s += count_ones(x.chunks[i] & y.chunks[i])
+ end
+ return s
+end
+
+## slower than the unpacked version, which is MUCH slower
+# than blas'd (this one saves storage though, keeping it commented
+# just in case)
+#function aTb{T,S}(A::BitMatrix{T}, B::BitMatrix{S})
+ #(mA, nA) = size(A)
+ #(mB, nB) = size(B)
+ #C = zeros(promote_type(T,S), nA, nB)
+ #z = zero(eltype(C))
+ #if mA != mB; error("*: argument shapes do not match"); end
+ #if mA == 0; return C; end
+ #col_ch = _jl_num_bit_chunks(mA)
+ ## TODO: avoid using aux chunks and copy (?)
+ #aux_chunksA = zeros(Uint64, col_ch)
+ #aux_chunksB = [zeros(Uint64, col_ch) | j=1:nB]
+ #for j = 1:nB
+ #_jl_copy_chunks(aux_chunksB[j], 1, B.chunks, (j-1)*mA+1, mA)
+ #end
+ #for i = 1:nA
+ #_jl_copy_chunks(aux_chunksA, 1, A.chunks, (i-1)*mA+1, mA)
+ #for j = 1:nB
+ #for k = 1:col_ch
+ #C[i, j] += count_ones(aux_chunksA[k] & aux_chunksB[j][k])
+ #end
+ #end
+ #end
+ #return C
+#end
+
+#aCb{T, S}(A::BitMatrix{T}, B::BitMatrix{S}) = aTb(A, B)
+
+function triu{T}(B::BitMatrix{T}, k::Int)
+ m,n = size(B)
+ A = bitzeros(T, m,n)
+ for i = max(k+1,1):n
+ j = clamp((i - 1) * m + 1, 1, i * m)
+ _jl_copy_chunks(A.chunks, j, B.chunks, j, min(i-k, m))
+ end
+ return A
+end
+triu(B::BitMatrix, k::Integer) = triu(B, int(k))
+
+function tril{T}(B::BitMatrix{T}, k::Int)
+ m,n = size(B)
+ A = bitzeros(T, m, n)
+ for i = 1:min(n, m+k)
+ j = clamp((i - 1) * m + i - k, 1, i * m)
+ _jl_copy_chunks(A.chunks, j, B.chunks, j, max(m-i+k+1, 0))
+ end
+ return A
+end
+tril(B::BitMatrix, k::Integer) = tril(B, int(k))
+
+## Matrix multiplication
+
+(*){T<:Integer}(A::BitArray{T}, B::BitArray{T}) = bitunpack(A) * bitunpack(B)
+
+#disambiguations
+(*){T<:Integer}(A::BitMatrix{T}, B::BitVector{T}) = bitunpack(A) * bitunpack(B)
+(*){T<:Integer}(A::BitVector{T}, B::BitMatrix{T}) = bitunpack(A) * bitunpack(B)
+(*){T<:Integer}(A::BitMatrix{T}, B::BitMatrix{T}) = bitunpack(A) * bitunpack(B)
+(*){T<:Integer}(A::BitMatrix{T}, B::AbstractVector) = (*)(bitunpack(A), B)
+(*){T<:Integer}(A::BitVector{T}, B::AbstractMatrix) = (*)(bitunpack(A), B)
+(*){T<:Integer}(A::BitMatrix{T}, B::AbstractMatrix) = (*)(bitunpack(A), B)
+(*){T<:Integer}(A::AbstractVector, B::BitMatrix{T}) = (*)(A, bitunpack(B))
+(*){T<:Integer}(A::AbstractMatrix, B::BitVector{T}) = (*)(A, bitunpack(B))
+(*){T<:Integer}(A::AbstractMatrix, B::BitMatrix{T}) = (*)(A, bitunpack(B))
+#end disambiguations
+
+for f in (:/, :\, :*)
+ @eval begin
+ ($f)(A::BitArray, B::AbstractArray) = ($f)(bitunpack(A), B)
+ ($f)(A::AbstractArray, B::BitArray) = ($f)(A, bitunpack(B))
+ end
+end
+
+# specialized Bool versions
+
+#disambiguations (TODO: improve!)
+(*)(A::BitMatrix{Bool}, B::BitVector{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
+(*)(A::BitVector{Bool}, B::BitMatrix{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
+(*)(A::BitMatrix{Bool}, B::BitMatrix{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
+#end disambiguations
+
+# TODO: improve this!
+(*)(A::BitArray{Bool}, B::BitArray{Bool}) = bitpack(bitunpack(A) * bitunpack(B))
+
+## diff and gradient
+
+# TODO: this could be improved (is it worth it?)
+gradient(F::BitVector) = gradient(bitunpack(F))
+gradient(F::BitVector, h::Real) = gradient(bitunpack(F), h)
+gradient(F::Vector, h::BitVector) = gradient(F, bitunpack(h))
+gradient(F::BitVector, h::Vector) = gradient(bitunpack(F), h)
+gradient(F::BitVector, h::BitVector) = gradient(bitunpack(F), bitunpack(h))
+
+## diag and related
+
+function diagm{T}(v::Union(BitVector{T},BitMatrix{T}))
+ if isa(v, BitMatrix)
+ if (size(v,1) != 1 && size(v,2) != 1)
+ error("Input should be nx1 or 1xn")
+ end
+ end
+
+ n = numel(v)
+ a = bitzeros(T, n, n)
+ for i=1:n
+ a[i,i] = v[i]
+ end
+
+ return a
+end
+
+## norm and rank
+
+svd(A::BitMatrix) = svd(float(A))
View
152 extras/linalg_sparse.jl
@@ -0,0 +1,152 @@
+## linalg_sparse.jl: Basic Linear Algebra functions for sparse representations ##
+
+#TODO? probably there's no use at all for these
+# dot(x::SparseAccumulator, y::SparseAccumulator)
+# cross(a::SparseAccumulator, b::SparseAccumulator) =
+
+
+## Matrix multiplication
+
+# In matrix-vector multiplication, the correct orientation of the vector is assumed.
+function (*){T1,T2}(A::SparseMatrixCSC{T1}, X::Vector{T2})
+ Y = zeros(promote_type(T1,T2), A.m)
+ for col = 1 : A.n, k = A.colptr[col] : (A.colptr[col+1]-1)
+ Y[A.rowval[k]] += A.nzval[k] * X[col]
+ end
+ return Y
+end
+
+# In vector-matrix multiplication, the correct orientation of the vector is assumed.
+# XXX: this is wrong (i.e. not what Arrays would do)!!
+function (*){T1,T2}(X::Vector{T1}, A::SparseMatrixCSC{T2})
+ Y = zeros(promote_type(T1,T2), A.n)
+ for col = 1 : A.n, k = A.colptr[col] : (A.colptr[col+1]-1)
+ Y[col] += X[A.rowval[k]] * A.nzval[k]
+ end
+ return Y
+end
+
+function (*){T1,T2}(A::SparseMatrixCSC{T1}, X::Matrix{T2})
+ mX, nX = size(X)
+ if A.n != mX; error("error in *: mismatched dimensions"); end
+ Y = zeros(promote_type(T1,T2), A.m, nX)
+ for multivec_col = 1:nX
+ for col = 1 : A.n
+ for k = A.colptr[col] : (A.colptr[col+1]-1)
+ Y[A.rowval[k], multivec_col] += A.nzval[k] * X[col, multivec_col]
+ end
+ end
+ end
+ return Y
+end
+
+function (*){T1,T2}(X::Matrix{T1}, A::SparseMatrixCSC{T2})
+ mX, nX = size(X)
+ if nX != A.m; error("error in *: mismatched dimensions"); end
+ Y = zeros(promote_type(T1,T2), mX, A.n)
+ for multivec_row = 1:mX
+ for col = 1 : A.n
+ for k = A.colptr[col] : (A.colptr[col+1]-1)
+ Y[multivec_row, col] += X[multivec_row, A.rowval[k]] * A.nzval[k]
+ end
+ end
+ end
+ return Y
+end
+
+# sparse matmul (sparse * sparse)
+function (*){TvX,TiX,TvY,TiY}(X::SparseMatrixCSC{TvX,TiX}, Y::SparseMatrixCSC{TvY,TiY})
+ mX, nX = size(X)
+ mY, nY = size(Y)
+ if nX != mY; error("error in *: mismatched dimensions"); end
+ Tv = promote_type(TvX, TvY)
+ Ti = promote_type(TiX, TiY)
+
+ colptr = Array(Ti, nY+1)
+ colptr[1] = 1
+ nnz_res = nnz(X) + nnz(Y)
+ rowval = Array(Ti, nnz_res) # TODO: Need better estimation of result space
+ nzval = Array(Tv, nnz_res)
+
+ colptrY = Y.colptr
+ rowvalY = Y.rowval
+ nzvalY = Y.nzval
+
+ spa = SparseAccumulator(Tv, Ti, mX);
+ for y_col = 1:nY
+ for y_elt = colptrY[y_col] : (colptrY[y_col+1]-1)
+ x_col = rowvalY[y_elt]
+ _jl_spa_axpy(spa, nzvalY[y_elt], X, x_col)
+ end
+ (rowval, nzval) = _jl_spa_store_reset(spa, y_col, colptr, rowval, nzval)
+ end
+
+ SparseMatrixCSC(mX, nY, colptr, rowval, nzval)
+end
+
+## triu, tril
+
+function triu{Tv,Ti}(S::SparseMatrixCSC{Tv,Ti}, k::Int)
+ m,n = size(S)
+ colptr = Array(Ti, n+1)
+ nnz = 0
+ for col = 1 : min(max(k+1,1), n+1)
+ colptr[col] = 1
+ end
+ for col = max(k+1,1) : n
+ for c1 = S.colptr[col] : S.colptr[col+1]-1
+ if S.rowval[c1] > col - k
+ break;
+ end
+ nnz += 1
+ end
+ colptr[col+1] = nnz+1
+ end
+ rowval = Array(Ti, nnz)
+ nzval = Array(Tv, nnz)
+ A = SparseMatrixCSC{Tv,Ti}(m, n, colptr, rowval, nzval)
+ for col = max(k+1,1) : S.n
+ c1 = S.colptr[col]
+ for c2 = A.colptr[col] : A.colptr[col+1]-1
+ A.rowval[c2] = S.rowval[c1]
+ A.nzval[c2] = S.nzval[c1]
+ c1 += 1
+ end
+ end
+ return A
+end
+triu{Tv,Ti}(S::SparseMatrixCSC{Tv,Ti}, k::Integer) = triu(S, int(k))
+
+function tril{Tv,Ti}(S::SparseMatrixCSC{Tv,Ti}, k::Int)
+ m,n = size(S)
+ colptr = Array(Ti, n+1)
+ nnz = 0
+ colptr[1] = 1
+ for col = 1 : min(n, m+k)
+ l1 = S.colptr[col+1]-1
+ for c1 = 0 : l1 - S.colptr[col]
+ if S.rowval[l1 - c1] < col - k
+ break;
+ end
+ nnz += 1
+ end
+ colptr[col+1] = nnz+1
+ end
+ for col = max(min(n, m+k)+2,1) : n+1
+ colptr[col] = nnz+1
+ end
+ rowval = Array(Ti, nnz)
+ nzval = Array(Tv, nnz)
+ A = SparseMatrixCSC{Tv,Ti}(m, n, colptr, rowval, nzval)
+ for col = 1 : min(n, m+k)
+ c1 = S.colptr[col+1]-1
+ l2 = A.colptr[col+1]-1
+ for c2 = 0 : l2 - A.colptr[col]
+ A.rowval[l2 - c2] = S.rowval[c1]
+ A.nzval[l2 - c2] = S.nzval[c1]
+ c1 -= 1
+ end
+ end
+ return A
+end
+tril{Tv,Ti}(S::SparseMatrixCSC{Tv,Ti}, k::Integer) = tril(S, int(k))
View
87 extras/sparse.jl
@@ -210,6 +210,11 @@ function speye(T::Type, m::Int, n::Int)
_jl_make_sparse(L, L, ones(T, x), m, n, (a,b)->a)
end
+function one{T}(S::SparseMatrixCSC{T})
+ m, n = size(S)
+ return speye(T, m, n)
+end
+
## Structure query functions
issym(A::SparseMatrixCSC) = nnz(A - A.') == 0 #'
@@ -448,7 +453,7 @@ ref(A::SparseMatrixCSC, i::Integer) = ref(A, ind2sub(size(A),i))
ref(A::SparseMatrixCSC, I::(Integer,Integer)) = ref(A, I[1], I[2])
function ref{T}(A::SparseMatrixCSC{T}, i0::Integer, i1::Integer)
- if i0 < 1 || i0 > A.m || i1 < 1 || i1 > A.n; error("ref: index out of bounds"); end
+ if !(1 <= i0 <= A.m && 1 <= i1 <= A.n); error("ref: index out of bounds"); end
first = A.colptr[i1]
last = A.colptr[i1+1]-1
while first <= last
@@ -506,7 +511,7 @@ assign(A::SparseMatrixCSC, v, I::(Integer,Integer)) = assign(A, v, I[1], I[2])
function assign{T,T_int}(A::SparseMatrixCSC{T,T_int}, v, i0::Integer, i1::Integer)
i0 = convert(T_int, i0)
i1 = convert(T_int, i1)
- if i0 < 1 || i0 > A.m || i1 < 1 || i1 > A.n; error("assign: index out of bounds"); end
+ if !(1 <= i0 <= A.m && 1 <= i1 <= A.n); error("assign: index out of bounds"); end
v = convert(T, v)
if v == 0 #either do nothing or delete entry if it exists
first = A.colptr[i1]
@@ -667,84 +672,6 @@ function assign{Tv,Ti}(A::SparseMatrixCSC{Tv,Ti}, v::AbstractMatrix, I::Abstract
return A
end
-## Matrix multiplication
-
-# In matrix-vector multiplication, the correct orientation of the vector is assumed.
-function (*){T1,T2}(A::SparseMatrixCSC{T1}, X::Vector{T2})
- Y = zeros(promote_type(T1,T2), A.m)
- for col = 1 : A.n, k = A.colptr[col] : (A.colptr[col+1]-1)
- Y[A.rowval[k]] += A.nzval[k] * X[col]
- end
- return Y
-end
-
-# In vector-matrix multiplication, the correct orientation of the vector is assumed.
-function (*){T1,T2}(X::Vector{T1}, A::SparseMatrixCSC{T2})
- Y = zeros(promote_type(T1,T2), A.n)
- for col = 1 : A.n, k = A.colptr[col] : (A.colptr[col+1]-1)
- Y[col] += X[A.rowval[k]] * A.nzval[k]
- end
- return Y
-end
-
-function (*){T1,T2}(A::SparseMatrixCSC{T1}, X::Matrix{T2})
- mX, nX = size(X)
- if A.n != mX; error("error in *: mismatched dimensions"); end
- Y = zeros(promote_type(T1,T2), A.m, nX)
- for multivec_col = 1:nX
- for col = 1 : A.n
- for k = A.colptr[col] : (A.colptr[col+1]-1)
- Y[A.rowval[k], multivec_col] += A.nzval[k] * X[col, multivec_col]
- end
- end
- end
- return Y
-end
-
-function (*){T1,T2}(X::Matrix{T1}, A::SparseMatrixCSC{T2})
- mX, nX = size(X)
- if nX != A.m; error("error in *: mismatched dimensions"); end
- Y = zeros(promote_type(T1,T2), mX, A.n)
- for multivec_row = 1:mX
- for col = 1 : A.n
- for k = A.colptr[col] : (A.colptr[col+1]-1)
- Y[multivec_row, col] += X[multivec_row, A.rowval[k]] * A.nzval[k]
- end
- end
- end
- return Y
-end
-
-# sparse matmul (sparse * sparse)
-function (*){TvX,TiX,TvY,TiY}(X::SparseMatrixCSC{TvX,TiX}, Y::SparseMatrixCSC{TvY,TiY})
- mX, nX = size(X)
- mY, nY = size(Y)
- if nX != mY; error("error in *: mismatched dimensions"); end
- Tv = promote_type(TvX, TvY)
- Ti = promote_type(TiX, TiY)
-
- colptr = Array(Ti, nY+1)
- colptr[1] = 1
- nnz_res = nnz(X) + nnz(Y)
- rowval = Array(Ti, nnz_res) # TODO: Need better estimation of result space
- nzval = Array(Tv, nnz_res)
-
- colptrY = Y.colptr
- rowvalY = Y.rowval
- nzvalY = Y.nzval
-
- spa = SparseAccumulator(Tv, Ti, mX);
- for y_col = 1:nY
- for y_elt = colptrY[y_col] : (colptrY[y_col+1]-1)
- x_col = rowvalY[y_elt]
- _jl_spa_axpy(spa, nzvalY[y_elt], X, x_col)
- end
- (rowval, nzval) = _jl_spa_store_reset(spa, y_col, colptr, rowval, nzval)
- end
-
- SparseMatrixCSC(mX, nY, colptr, rowval, nzval)
-end
-
# Sparse concatenation
function vcat(X::SparseMatrixCSC...)
View
6 test/bitarray.jl
@@ -1,4 +1,5 @@
load("../extras/bitarray.jl")
+load("../extras/linalg_bitarray.jl")
macro check_bit_operation(func, RetT, args)
quote
@@ -424,4 +425,9 @@ for k = -max(n1,n2) : max(n1,n2)
@check_bit_operation triu BitArray{T} (b1, k)
end
+#b1 = bitrand(T, v1)
+#@check_bit_operation diff Array{S} (b1,)
+#b1 = bitrand(T, n1, n2)
+#@check_bit_operation diff Array{S} (b1,)
+
@ timesofar "linalg"
View
1 test/sparse.jl
@@ -1,4 +1,5 @@
include("../extras/sparse.jl")
+include("../extras/linalg_sparse.jl")
include("../extras/linalg_suitesparse.jl")

0 comments on commit bfd7bcc

Please sign in to comment.