Skip to content

Commit

Permalink
add new vecnorm(x, p), generalizing and replacing normfro; also fix #…
Browse files Browse the repository at this point in the history
  • Loading branch information
stevengj committed Mar 5, 2014
1 parent c70109f commit da747c2
Show file tree
Hide file tree
Showing 16 changed files with 133 additions and 70 deletions.
9 changes: 8 additions & 1 deletion NEWS.md
Expand Up @@ -143,7 +143,14 @@ Library improvements


* `condskeel` for Skeel condition numbers ([#5726]). * `condskeel` for Skeel condition numbers ([#5726]).


* norm(Matrix) no longer calculates vector norm when first dimension is one ([#5545]). * `norm(::Matrix)` no longer calculates a vector norm when the first
dimension is one ([#5545]); it always uses the operator (induced)
matrix norm.

* New `vecnorm(itr, p=2)` function that computes the norm of
any iterable collection of numbers as if it were a vector of
the same length. This generalizes and replaces `normfro` ([#6057]),
and `norm` is now type-stable ([#6056]).


* Sparse linear algebra * Sparse linear algebra


Expand Down
1 change: 1 addition & 0 deletions base/deprecated.jl
Expand Up @@ -400,6 +400,7 @@ eval(Sys, :(@deprecate shlib_list dllist))
IntSet(xs::Integer...) = (s=IntSet(); for a in xs; push!(s,a); end; s) IntSet(xs::Integer...) = (s=IntSet(); for a in xs; push!(s,a); end; s)
Set{T<:Number}(xs::T...) = Set{T}(xs) Set{T<:Number}(xs::T...) = Set{T}(xs)


@deprecate normfro(A) vecnorm(A)


# 0.3 discontinued functions # 0.3 discontinued functions


Expand Down
2 changes: 1 addition & 1 deletion base/exports.jl
Expand Up @@ -627,7 +627,6 @@ export
lufact!, lufact!,
lufact, lufact,
norm, norm,
normfro,
null, null,
peakflops, peakflops,
pinv, pinv,
Expand Down Expand Up @@ -656,6 +655,7 @@ export
tril, tril,
triu!, triu!,
triu, triu,
vecnorm,


# sparse # sparse
etree, etree,
Expand Down
2 changes: 1 addition & 1 deletion base/linalg.jl
Expand Up @@ -85,7 +85,6 @@ export
lufact, lufact,
lufact!, lufact!,
norm, norm,
normfro,
null, null,
peakflops, peakflops,
pinv, pinv,
Expand Down Expand Up @@ -114,6 +113,7 @@ export
triu, triu,
tril!, tril!,
triu!, triu!,
vecnorm,


# Operators # Operators
\, \,
Expand Down
2 changes: 2 additions & 0 deletions base/linalg/blas.jl
Expand Up @@ -169,6 +169,7 @@ for (fname, elty, ret_type) in ((:dnrm2_,:Float64,:Float64),
end end
end end
nrm2(x::StridedVector) = nrm2(length(x), x, stride(x,1)) nrm2(x::StridedVector) = nrm2(length(x), x, stride(x,1))
nrm2(x::Array) = nrm2(length(x), pointer(x), 1)


## asum ## asum
for (fname, elty, ret_type) in ((:dasum_,:Float64,:Float64), for (fname, elty, ret_type) in ((:dasum_,:Float64,:Float64),
Expand All @@ -185,6 +186,7 @@ for (fname, elty, ret_type) in ((:dasum_,:Float64,:Float64),
end end
end end
asum(x::StridedVector) = asum(length(x), x, stride(x,1)) asum(x::StridedVector) = asum(length(x), x, stride(x,1))
asum(x::Array) = asum(length(x), pointer(x), 1)


## axpy ## axpy
for (fname, elty) in ((:daxpy_,:Float64), for (fname, elty) in ((:daxpy_,:Float64),
Expand Down
2 changes: 1 addition & 1 deletion base/linalg/cholmod.jl
Expand Up @@ -386,7 +386,7 @@ function copy{Tv<:CHMVTypes}(B::CholmodDense{Tv})
(Ptr{c_CholmodDense{Tv}},Ptr{Uint8}), &B.c, cmn(Int32))) (Ptr{c_CholmodDense{Tv}},Ptr{Uint8}), &B.c, cmn(Int32)))
end end


function norm{Tv<:CHMVTypes}(D::CholmodDense{Tv},p::Number=1) function norm{Tv<:CHMVTypes}(D::CholmodDense{Tv},p::Real=1)
ccall((:cholmod_norm_dense, :libcholmod), Float64, ccall((:cholmod_norm_dense, :libcholmod), Float64,
(Ptr{c_CholmodDense{Tv}}, Cint, Ptr{Uint8}), (Ptr{c_CholmodDense{Tv}}, Cint, Ptr{Uint8}),
&D.c, p == 1 ? 1 :(p == Inf ? 1 : throw(ArgumentError("p must be 1 or Inf"))),cmn(Int32)) &D.c, p == 1 ? 1 :(p == Inf ? 1 : throw(ArgumentError("p must be 1 or Inf"))),cmn(Int32))
Expand Down
4 changes: 2 additions & 2 deletions base/linalg/dense.jl
Expand Up @@ -18,11 +18,11 @@ function norm{T<:BlasFloat, TI<:Integer}(x::StridedVector{T}, rx::Union(Range1{T
BLAS.nrm2(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx)) BLAS.nrm2(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx))
end end


function norm{T<:BlasFloat}(x::StridedVector{T}, p::Number=2) function vecnorm{T<:BlasFloat}(x::Union(Array{T},StridedVector{T}), p::Real=2)
length(x) == 0 && return zero(T) length(x) == 0 && return zero(T)
p == 1 && T <: Real && return BLAS.asum(x) p == 1 && T <: Real && return BLAS.asum(x)
p == 2 && return BLAS.nrm2(x) p == 2 && return BLAS.nrm2(x)
invoke(norm, (AbstractVector, Number), x, p) invoke(vecnorm, (Any, Real), x, p)
end end


function triu!{T}(M::Matrix{T}, k::Integer) function triu!{T}(M::Matrix{T}, k::Integer)
Expand Down
126 changes: 83 additions & 43 deletions base/linalg/generic.jl
Expand Up @@ -51,54 +51,97 @@ diag(A::AbstractVector) = error("use diagm instead of diag to construct a diagon


#diagm{T}(v::AbstractVecOrMat{T}) #diagm{T}(v::AbstractVecOrMat{T})


function normMinusInf(x::AbstractVector) # special cases of vecnorm; note that they don't need to handle isempty(x)
n = length(x) function vecnormMinusInf(x)
n > 0 || return real(zero(eltype(x))) s = start(x)
a = abs(x[1]) (v, s) = next(x, s)
@inbounds for i = 2:n minabs = abs(v)
a = min(a, abs(x[i])) while !done(x, s)
(v, s) = next(x, s)
minabs = Base.scalarmin(minabs, abs(v))
end end
return a return float(minabs)
end end
function normInf(x::AbstractVector) function vecnormInf(x)
a = real(zero(eltype(x))) s = start(x)
@inbounds for i = 1:length(x) (v, s) = next(x, s)
a = max(a, abs(x[i])) maxabs = abs(v)
while !done(x, s)
(v, s) = next(x, s)
maxabs = Base.scalarmax(maxabs, abs(v))
end end
return a return float(maxabs)
end end
function norm1(x::AbstractVector) function vecnorm1(x)
a = real(zero(eltype(x))) s = start(x)
@inbounds for i = 1:length(x) (v, s) = next(x, s)
a += abs(x[i]) av = float(abs(v))
T = typeof(av)
sum::promote_type(Float64, T) = av
while !done(x, s)
(v, s) = next(x, s)
sum += abs(v)
end end
return a return convert(T, sum)
end end
function norm2(x::AbstractVector) function vecnorm2(x)
nrmInfInv = inv(norm(x,Inf)) maxabs = vecnormInf(x)
isinf(nrmInfInv) && return zero(nrmInfInv) maxabs == 0 && return maxabs
a = abs2(x[1]*nrmInfInv) s = start(x)
@inbounds for i = 2:length(x) (v, s) = next(x, s)
a += abs2(x[i]*nrmInfInv) T = typeof(maxabs)
scale::promote_type(Float64, T) = 1/maxabs
y = abs(v)*scale
sum::promote_type(Float64, T) = y*y
while !done(x, s)
(v, s) = next(x, s)
y = abs(v)*scale
sum += y*y
end end
return sqrt(a)/nrmInfInv return convert(T, maxabs * sqrt(sum))
end end
function normp(x::AbstractVector,p::Number) function vecnormp(x, p)
absx = convert(Vector{typeof(sqrt(abs(x[1])))}, abs(x)) if p > 1 || p < 0 # need to rescale to avoid overflow/underflow
dx = maximum(absx) maxabs = vecnormInf(x)
dx == 0 && return zero(typeof(absx)) maxabs == 0 && return maxabs
scale!(absx, 1/dx) s = start(x)
pp = convert(eltype(absx), p) (v, s) = next(x, s)
return dx*sum(absx.^pp)^inv(pp) T = typeof(maxabs)
spp::promote_type(Float64, T) = p
scale::promote_type(Float64, T) = 1/maxabs
ssum::promote_type(Float64, T) = (abs(v)*scale)^spp
while !done(x, s)
(v, s) = next(x, s)
ssum += (abs(v)*scale)^spp
end
return convert(T, maxabs * ssum^inv(spp))
else # 0 < p < 1, no need for rescaling (but technically not a true norm)
s = start(x)
(v, s) = next(x, s)
av = float(abs(v))
T = typeof(av)
pp::promote_type(Float64, T) = p
sum::promote_type(Float64, T) = av^pp
while !done(x, s)
(v, s) = next(x, s)
sum += abs(v)^pp
end
return convert(T, sum^inv(pp))
end
end end
function norm(x::AbstractVector, p::Number=2) function vecnorm(itr, p::Real=2)
p == 0 && return countnz(x) isempty(itr) && return float(real(zero(eltype(itr))))
p == Inf && return normInf(x) p == 2 && return vecnorm2(itr)
p == -Inf && return normMinusInf(x) p == 1 && return vecnorm1(itr)
p == 1 && return norm1(x) p == Inf && return vecnormInf(itr)
p == 2 && return norm2(x) p == 0 && return convert(typeof(float(real(zero(eltype(itr))))),
normp(x,p) countnz(itr))
p == -Inf && return vecnormMinusInf(itr)
vecnormp(itr,p)
end end
vecnorm(x::Number, p::Real=2) = p == 0 ? real(x==0 ? zero(x) : one(x)) : abs(x)

norm(x::AbstractVector, p::Real=2) = vecnorm(x, p)


function norm1{T}(A::AbstractMatrix{T}) function norm1{T}(A::AbstractMatrix{T})
m,n = size(A) m,n = size(A)
Expand Down Expand Up @@ -133,9 +176,9 @@ function normInf{T}(A::AbstractMatrix{T})
end end
return nrm return nrm
end end
function norm{T}(A::AbstractMatrix{T}, p::Number=2) function norm{T}(A::AbstractMatrix{T}, p::Real=2)
p == 1 && return norm1(A)
p == 2 && return norm2(A) p == 2 && return norm2(A)
p == 1 && return norm1(A)
p == Inf && return normInf(A) p == Inf && return normInf(A)
throw(ArgumentError("invalid p-norm p=$p. Valid: 1, 2, Inf")) throw(ArgumentError("invalid p-norm p=$p. Valid: 1, 2, Inf"))
end end
Expand All @@ -146,9 +189,6 @@ function norm(x::Number, p=2)
float(abs(x)) float(abs(x))
end end


normfro(A::AbstractMatrix) = norm(reshape(A, length(A)))
normfro(x::Number) = abs(x)

rank(A::AbstractMatrix, tol::Real) = sum(svdvals(A) .> tol) rank(A::AbstractMatrix, tol::Real) = sum(svdvals(A) .> tol)
function rank(A::AbstractMatrix) function rank(A::AbstractMatrix)
m,n = size(A) m,n = size(A)
Expand Down
4 changes: 2 additions & 2 deletions base/linalg/sparse.jl
Expand Up @@ -457,9 +457,9 @@ end
diff(a::SparseMatrixCSC, dim::Integer)= dim==1 ? sparse_diff1(a) : sparse_diff2(a) diff(a::SparseMatrixCSC, dim::Integer)= dim==1 ? sparse_diff1(a) : sparse_diff2(a)


## norm and rank ## norm and rank
normfro(A::SparseMatrixCSC) = norm(A.nzval,2) vecnorm(A::SparseMatrixCSC, p::Real=2) = vecnorm(A.nzval, p)


function norm(A::SparseMatrixCSC,p::Number=1) function norm(A::SparseMatrixCSC,p::Real=1)
m, n = size(A) m, n = size(A)
if m == 0 || n == 0 || isempty(A) if m == 0 || n == 0 || isempty(A)
return real(zero(eltype(A))) return real(zero(eltype(A)))
Expand Down
8 changes: 2 additions & 6 deletions base/quadgk.jl
Expand Up @@ -42,10 +42,6 @@ immutable Segment
end end
isless(i::Segment, j::Segment) = isless(i.E, j.E) isless(i::Segment, j::Segment) = isless(i.E, j.E)


# use norm(A,1) for matrices since it is cheaper than norm(A) = norm(A,2),
# but only assume that norm(x) exists for more general vector spaces
cheapnorm(A::AbstractMatrix) = norm(A,1)
cheapnorm(x) = norm(x)


# Internal routine: approximately integrate f(x) over the interval (a,b) # Internal routine: approximately integrate f(x) over the interval (a,b)
# by evaluating the integration rule (x,w,gw). Return a Segment. # by evaluating the integration rule (x,w,gw). Return a Segment.
Expand Down Expand Up @@ -156,14 +152,14 @@ end


function quadgk{T<:FloatingPoint}(f, a::T,b::T,c::T...; function quadgk{T<:FloatingPoint}(f, a::T,b::T,c::T...;
abstol=zero(T), reltol=sqrt(eps(T)), abstol=zero(T), reltol=sqrt(eps(T)),
maxevals=10^7, order=7, norm=cheapnorm) maxevals=10^7, order=7, norm=vecnorm)
do_quadgk(f, [a, b, c...], order, T, abstol, reltol, maxevals, norm) do_quadgk(f, [a, b, c...], order, T, abstol, reltol, maxevals, norm)
end end


function quadgk{T<:FloatingPoint}(f, a::Complex{T}, function quadgk{T<:FloatingPoint}(f, a::Complex{T},
b::Complex{T},c::Complex{T}...; b::Complex{T},c::Complex{T}...;
abstol=zero(T), reltol=sqrt(eps(T)), abstol=zero(T), reltol=sqrt(eps(T)),
maxevals=10^7, order=7, norm=cheapnorm) maxevals=10^7, order=7, norm=vecnorm)
do_quadgk(f, [a, b, c...], order, T, abstol, reltol, maxevals, norm) do_quadgk(f, [a, b, c...], order, T, abstol, reltol, maxevals, norm)
end end


Expand Down
9 changes: 9 additions & 0 deletions base/reduce.jl
Expand Up @@ -128,6 +128,15 @@ end


## countnz & count ## countnz & count



function countnz(itr)
n = 0
for x in itr
n += (x != 0)
end
return n
end

function countnz{T}(a::AbstractArray{T}) function countnz{T}(a::AbstractArray{T})
n = 0 n = 0
z = zero(T) z = zero(T)
Expand Down
6 changes: 2 additions & 4 deletions doc/stdlib/base.rst
Expand Up @@ -4350,7 +4350,7 @@ Although several external packages are available for numeric integration
and solution of ordinary differential equations, we also provide and solution of ordinary differential equations, we also provide
some built-in integration support in Julia. some built-in integration support in Julia.


.. function:: quadgk(f, a,b,c...; reltol=sqrt(eps), abstol=0, maxevals=10^7, order=7, norm=norm) .. function:: quadgk(f, a,b,c...; reltol=sqrt(eps), abstol=0, maxevals=10^7, order=7, norm=vecnorm)


Numerically integrate the function ``f(x)`` from ``a`` to ``b``, Numerically integrate the function ``f(x)`` from ``a`` to ``b``,
and optionally over additional intervals ``b`` to ``c`` and so on. and optionally over additional intervals ``b`` to ``c`` and so on.
Expand Down Expand Up @@ -4379,9 +4379,7 @@ some built-in integration support in Julia.
type, or in fact any type supporting ``+``, ``-``, multiplication type, or in fact any type supporting ``+``, ``-``, multiplication
by real values, and a ``norm`` (i.e., any normed vector space). by real values, and a ``norm`` (i.e., any normed vector space).
Alternatively, a different norm can be specified by passing a `norm`-like Alternatively, a different norm can be specified by passing a `norm`-like
function as the `norm` keyword argument (which defaults to `norm` for function as the `norm` keyword argument (which defaults to `vecnorm`).
most types, except for matrices where the default is the L1 norm since
it is cheaper to compute).


The algorithm is an adaptive Gauss-Kronrod integration technique: The algorithm is an adaptive Gauss-Kronrod integration technique:
the integral in each interval is estimated using a Kronrod rule the integral in each interval is estimated using a Kronrod rule
Expand Down
11 changes: 8 additions & 3 deletions doc/stdlib/linalg.rst
Expand Up @@ -274,11 +274,16 @@ Linear algebra functions in Julia are largely implemented by calling functions f


For vectors, ``p`` can assume any numeric value (even though not all values produce a mathematically valid vector norm). In particular, ``norm(A, Inf)`` returns the largest value in ``abs(A)``, whereas ``norm(A, -Inf)`` returns the smallest. For vectors, ``p`` can assume any numeric value (even though not all values produce a mathematically valid vector norm). In particular, ``norm(A, Inf)`` returns the largest value in ``abs(A)``, whereas ``norm(A, -Inf)`` returns the smallest.


For matrices, valid values of ``p`` are ``1``, ``2``, or ``Inf``. Use :func:`normfro` to compute the Frobenius norm. For matrices, valid values of ``p`` are ``1``, ``2``, or ``Inf``. Use :func:`vecnorm` to compute the Frobenius norm.


.. function:: normfro(A) .. function:: vecnorm(A, [p])


Compute the Frobenius norm of a matrix ``A``. For any iterable container ``A`` (including arrays of any dimension)
of numbers, compute the ``p``-norm (defaulting to ``p=2``) as if ``A``
were a vector of the corresponding length.

For example, if ``A`` is a matrix and ``p=2``, then this is equivalent
to the Frobenius norm.


.. function:: cond(M, [p]) .. function:: cond(M, [p])


Expand Down
4 changes: 2 additions & 2 deletions test/arpack.jl
Expand Up @@ -59,9 +59,9 @@ Phi=CPM(Q)
@test_approx_eq d[1] 1. # largest eigenvalue should be 1. @test_approx_eq d[1] 1. # largest eigenvalue should be 1.
v=reshape(v,(50,50)) # reshape to matrix v=reshape(v,(50,50)) # reshape to matrix
v/=trace(v) # factor out arbitrary phase v/=trace(v) # factor out arbitrary phase
@test isapprox(normfro(imag(v)),0.) # it should be real @test isapprox(vecnorm(imag(v)),0.) # it should be real
v=real(v) v=real(v)
# @test isapprox(normfro(v-v')/2,0.) # it should be Hermitian # @test isapprox(vecnorm(v-v')/2,0.) # it should be Hermitian
# Since this fails sometimes (numerical precision error),this test is commented out # Since this fails sometimes (numerical precision error),this test is commented out
v=(v+v')/2 v=(v+v')/2
@test isposdef(v) @test isposdef(v)
Expand Down
6 changes: 3 additions & 3 deletions test/linalg1.jl
Expand Up @@ -436,13 +436,13 @@ Ar = sprandn(10,10,.1)
Ai = int(ceil(Ar*100)) Ai = int(ceil(Ar*100))
@test_approx_eq norm(Ac,1) norm(full(Ac),1) @test_approx_eq norm(Ac,1) norm(full(Ac),1)
@test_approx_eq norm(Ac,Inf) norm(full(Ac),Inf) @test_approx_eq norm(Ac,Inf) norm(full(Ac),Inf)
@test_approx_eq normfro(Ac) normfro(full(Ac)) @test_approx_eq vecnorm(Ac) vecnorm(full(Ac))
@test_approx_eq norm(Ar,1) norm(full(Ar),1) @test_approx_eq norm(Ar,1) norm(full(Ar),1)
@test_approx_eq norm(Ar,Inf) norm(full(Ar),Inf) @test_approx_eq norm(Ar,Inf) norm(full(Ar),Inf)
@test_approx_eq normfro(Ar) normfro(full(Ar)) @test_approx_eq vecnorm(Ar) vecnorm(full(Ar))
@test_approx_eq norm(Ai,1) norm(full(Ai),1) @test_approx_eq norm(Ai,1) norm(full(Ai),1)
@test_approx_eq norm(Ai,Inf) norm(full(Ai),Inf) @test_approx_eq norm(Ai,Inf) norm(full(Ai),Inf)
@test_approx_eq normfro(Ai) normfro(full(Ai)) @test_approx_eq vecnorm(Ai) vecnorm(full(Ai))


# scale real matrix by complex type # scale real matrix by complex type
@test_throws scale!([1.0], 2.0im) @test_throws scale!([1.0], 2.0im)
Expand Down
7 changes: 6 additions & 1 deletion test/linalg2.jl
Expand Up @@ -352,7 +352,7 @@ for relty in (Float16, Float32, Float64, BigFloat), elty in (relty, Complex{relt
test_approx_eq_vecs(u1, u2) test_approx_eq_vecs(u1, u2)
test_approx_eq_vecs(v1, v2) test_approx_eq_vecs(v1, v2)
end end
@test_approx_eq_eps 0 normfro(u2*diagm(d2)*v2'-Tfull) n*max(n^2*eps(relty), normfro(u1*diagm(d1)*v1'-Tfull)) @test_approx_eq_eps 0 vecnorm(u2*diagm(d2)*v2'-Tfull) n*max(n^2*eps(relty), vecnorm(u1*diagm(d1)*v1'-Tfull))
end end
end end
end end
Expand Down Expand Up @@ -642,6 +642,11 @@ for elty in (Float16, Float32, Float64, BigFloat, Complex{Float16}, Complex{Floa
@test norm(As + Bs,1) <= norm(As,1) + norm(Bs,1) @test norm(As + Bs,1) <= norm(As,1) + norm(Bs,1)
elty <: Union(BigFloat,Complex{BigFloat},BigInt) || @test norm(As + Bs) <= norm(As) + norm(Bs) # two is default elty <: Union(BigFloat,Complex{BigFloat},BigInt) || @test norm(As + Bs) <= norm(As) + norm(Bs) # two is default
@test norm(As + Bs,Inf) <= norm(As,Inf) + norm(Bs,Inf) @test norm(As + Bs,Inf) <= norm(As,Inf) + norm(Bs,Inf)

# vecnorm:
for p = -2:3
@test norm(reshape(A, length(A)), p) == vecnorm(A, p)
end
end end
end end


Expand Down

0 comments on commit da747c2

Please sign in to comment.