Skip to content

Commit

Permalink
RowVector, pinv, and quasi-division (#23067)
Browse files Browse the repository at this point in the history
* Make pinv(::AbstractVector) return a RowVector

Also broaden from StridedVector to AbstractVector while at it and don't
employ full matrix SVD.

* Add pinv(::RowVector)

* Add /(::Number, ::AbstractVector)

Also start testing consistency between division and multiplication with
pseudo-inverse involving vectors.

* Add \(::RowVector, ::RowVector), returning a scalar

* Fix \ for AbstractVector LHS

Let \(::AbstractVector, ::AbstractMatrix) return a RowVector and
\(::AbstractVector, ::AbstractVector) return a scalar.
  • Loading branch information
martinholters authored and andreasnoack committed Aug 22, 2017
1 parent 52543df commit f1d3556
Show file tree
Hide file tree
Showing 5 changed files with 73 additions and 3 deletions.
1 change: 0 additions & 1 deletion base/linalg/dense.jl
Original file line number Diff line number Diff line change
Expand Up @@ -895,7 +895,6 @@ function pinv(A::StridedMatrix{T}) where T
tol = eps(real(float(one(T))))*maximum(size(A))
return pinv(A, tol)
end
pinv(a::StridedVector) = pinv(reshape(a, length(a), 1))
function pinv(x::Number)
xi = inv(x)
return ifelse(isfinite(xi), xi, zero(xi))
Expand Down
16 changes: 15 additions & 1 deletion base/linalg/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -794,6 +794,19 @@ function inv(A::AbstractMatrix{T}) where T
A_ldiv_B!(factorize(convert(AbstractMatrix{S}, A)), eye(S0, checksquare(A)))
end

function pinv(v::AbstractVector{T}, tol::Real=real(zero(T))) where T
res = similar(v, typeof(zero(T) / (abs2(one(T)) + abs2(one(T)))))'
den = sum(abs2, v)
# as tol is the threshold relative to the maximum singular value, for a vector with
# single singular value σ=√den, σ ≦ tol*σ is equivalent to den=0 ∨ tol≥1
if iszero(den) || tol >= one(tol)
fill!(res, zero(eltype(res)))
else
res .= v' ./ den
end
return res
end

"""
\\(A, B)
Expand Down Expand Up @@ -841,10 +854,11 @@ function (\)(A::AbstractMatrix, B::AbstractVecOrMat)
return qrfact(A,Val(true)) \ B
end

(\)(a::AbstractVector, b::AbstractArray) = reshape(a, length(a), 1) \ b
(\)(a::AbstractVector, b::AbstractArray) = pinv(a) * b
(/)(A::AbstractVecOrMat, B::AbstractVecOrMat) = (B' \ A')'
# \(A::StridedMatrix,x::Number) = inv(A)*x Should be added at some point when the old elementwise version has been deprecated long enough
# /(x::Number,A::StridedMatrix) = x*inv(A)
/(x::Number, v::AbstractVector) = x*pinv(v)

cond(x::Number) = x == 0 ? Inf : 1.0
cond(x::Number, p) = cond(x)
Expand Down
4 changes: 4 additions & 0 deletions base/linalg/rowvector.jl
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,12 @@ Ac_mul_B(::RowVector, ::AbstractVector) = throw(DimensionMismatch("Cannot multip
Ac_mul_B(vec::AbstractVector, rowvec::RowVector) = throw(DimensionMismatch("Cannot multiply two transposed vectors"))
@inline Ac_mul_B(vec1::AbstractVector, vec2::AbstractVector) = adjoint(vec1)*vec2

# Pseudo-inverse
pinv(v::RowVector, tol::Real=0) = pinv(v', tol)'

# Left Division #

\(rowvec1::RowVector, rowvec2::RowVector) = pinv(rowvec1) * rowvec2
\(mat::AbstractMatrix, rowvec::RowVector) = throw(DimensionMismatch("Cannot left-divide transposed vector by matrix"))
At_ldiv_B(mat::AbstractMatrix, rowvec::RowVector) = throw(DimensionMismatch("Cannot left-divide transposed vector by matrix"))
Ac_ldiv_B(mat::AbstractMatrix, rowvec::RowVector) = throw(DimensionMismatch("Cannot left-divide transposed vector by matrix"))
Expand Down
39 changes: 38 additions & 1 deletion test/linalg/dense.jl
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ bimg = randn(n,2)/2
@test_throws DimensionMismatch b'\b
@test_throws DimensionMismatch b\b'
@test norm(a*x - b, 1)/norm(b) < ε*κ*n*2 # Ad hoc, revisit!
@test zeros(eltya,n)\ones(eltya,n) zeros(eltya,n,1)\ones(eltya,n,1)
@test zeros(eltya,n)\ones(eltya,n) (zeros(eltya,n,1)\ones(eltya,n,1))[1,1]
end

@testset "Test nullspace" begin
Expand Down Expand Up @@ -613,6 +613,43 @@ end
end
end

function test_rdiv_pinv_consistency(a, b)
@test (a*b)/b a*(b/b) (a*b)*pinv(b) a*(b*pinv(b))
@test typeof((a*b)/b) == typeof(a*(b/b)) == typeof((a*b)*pinv(b)) == typeof(a*(b*pinv(b)))
end
function test_ldiv_pinv_consistency(a, b)
@test a\(a*b) (a\a)*b (pinv(a)*a)*b pinv(a)*(a*b)
@test typeof(a\(a*b)) == typeof((a\a)*b) == typeof((pinv(a)*a)*b) == typeof(pinv(a)*(a*b))
end
function test_div_pinv_consistency(a, b)
test_rdiv_pinv_consistency(a, b)
test_ldiv_pinv_consistency(a, b)
end

@testset "/ and \\ consistency with pinv for vectors" begin
@testset "Tests for type $elty" for elty in (Float32, Float64, Complex64, Complex128)
c = rand(elty, 5)
r = rand(elty, 5)'
cm = rand(elty, 5, 1)
rm = rand(elty, 1, 5)
@testset "inner prodcuts" begin
test_div_pinv_consistency(r, c)
test_div_pinv_consistency(rm, c)
test_div_pinv_consistency(r, cm)
test_div_pinv_consistency(rm, cm)
end
@testset "outer prodcuts" begin
test_div_pinv_consistency(c, r)
test_div_pinv_consistency(cm, rm)
end
@testset "matrix/vector" begin
m = rand(5, 5)
test_ldiv_pinv_consistency(m, c)
test_rdiv_pinv_consistency(r, m)
end
end
end

@testset "test ops on Numbers for $elty" for elty in [Float32,Float64,Complex64,Complex128]
a = rand(elty)
@test expm(a) == exp(a)
Expand Down
16 changes: 16 additions & 0 deletions test/linalg/pinv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,18 @@ end
a = onediag_sparse(eltya, m)
test_pinv(a, m, m, default_tol, default_tol, default_tol)
end
@testset "Vector" begin
a = rand(eltya, m)
apinv = @inferred pinv(a)
@test pinv(hcat(a)) apinv
@test apinv isa RowVector{eltya}
end
@testset "RowVector" begin
a = rand(eltya, m)'
apinv = @inferred pinv(a)
@test pinv(vcat(a)) apinv
@test apinv isa Vector{eltya}
end
end
end

Expand All @@ -141,6 +153,10 @@ end
@test a[1] 0.0
@test a[2] 0.0

a = pinv([zero(eltya); zero(eltya)]')
@test a[1] 0.0
@test a[2] 0.0

a = pinv(Diagonal([zero(eltya); zero(eltya)]))
@test a.diag[1] 0.0
@test a.diag[2] 0.0
Expand Down

5 comments on commit f1d3556

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Executing the daily benchmark build, I will reply here when finished:

@nanosoldier runbenchmarks(ALL, isdaily = true)

@nanosoldier
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Your benchmark job has completed - possible performance regressions were detected. A full report can be found here. cc @ararslan

@fredrikekre
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess, BaseBenchmarks.jl use ctranspose (now deprecated), should be the reason for the sparse transpose regression.

@andreasnoack
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. I think that is the case. Maybe we can update BaseBenchmarks to use ' instead.

@fredrikekre
Copy link
Member

@fredrikekre fredrikekre commented on f1d3556 Aug 22, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

JuliaCI/BaseBenchmarks.jl#102 (' would be smart, but does not work for the in-place adjoint)

Please sign in to comment.