Skip to content

Commit

Permalink
Updates for Julia 0.6
Browse files Browse the repository at this point in the history
  • Loading branch information
ararslan committed Apr 15, 2017
1 parent 87fc62c commit c3d00fd
Show file tree
Hide file tree
Showing 19 changed files with 234 additions and 274 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Expand Up @@ -3,8 +3,8 @@ os:
- osx
- linux
julia:
- 0.4
- 0.5
- 0.6
- nightly
notifications:
email: false
Expand Down
4 changes: 2 additions & 2 deletions REQUIRE
@@ -1,3 +1,3 @@
julia 0.4
Compat 0.8.4
julia 0.5
Compat 0.17.0
StatsBase 0.7.0
2 changes: 1 addition & 1 deletion src/MultivariateStats.jl
Expand Up @@ -4,7 +4,7 @@ module MultivariateStats
using Compat
using StatsBase

import Base: length, size, show, dump, sumabs2
import Base: length, size, show, dump
import Base.LinAlg: Cholesky
import StatsBase: fit, predict
using Compat: view
Expand Down
6 changes: 3 additions & 3 deletions src/cmds.jl
Expand Up @@ -37,7 +37,7 @@ function dmat2gram!{GT}(G::AbstractMatrix{GT}, D::AbstractMatrix)
u = zeros(GT, n)
s = 0.0
for j = 1:n
s += (u[j] = Base.sumabs2(view(D,:,j)) / n)
s += (u[j] = sum(abs2, view(D,:,j)) / n)
end
s /= n

Expand Down Expand Up @@ -98,8 +98,8 @@ function classical_mds{T<:Real}(D::AbstractMatrix{T}, p::Int;

#Check if the last considered eigenvalue is degenerate
if m>0
nevalsmore = sum(abs(E[:values][ord[m+1:end]] .- v[m]^2) .< n*eps())
nevals = sum(abs(E[:values] .- v[m]^2) .< n*eps())
nevalsmore = sum(abs.(E[:values][ord[m+1:end]] .- v[m]^2) .< n*eps())
nevals = sum(abs.(E[:values] .- v[m]^2) .< n*eps())
if nevalsmore > 1
dowarn && warn("The last eigenpair is degenerate with $(nevals-1) others; $nevalsmore were ignored. Answer is not unique")
end
Expand Down
16 changes: 8 additions & 8 deletions src/ica.jl
Expand Up @@ -24,7 +24,7 @@ transform(M::ICA, x::AbstractVecOrMat) = At_mul_B(M.W, centralize(x, M.mean))
#
# It returns a function value v, and derivative d
#
abstract ICAGDeriv
@compat abstract type ICAGDeriv end

immutable Tanh <: ICAGDeriv
a::Float64
Expand Down Expand Up @@ -76,15 +76,15 @@ function fastica!(W::DenseMatrix{Float64}, # initialized component matrix,
end

# pre-allocated storage
Wp = Array(Float64, m, k) # to store previous version of W
U = Array(Float64, n, k) # to store w'x & g(w'x)
Y = Array(Float64, m, k) # to store E{x g(w'x)} for components
E1 = Array(Float64, k) # store E{g'(w'x)} for components
Wp = Matrix{Float64}(m, k) # to store previous version of W
U = Matrix{Float64}(n, k) # to store w'x & g(w'x)
Y = Matrix{Float64}(m, k) # to store E{x g(w'x)} for components
E1 = Vector{Float64}(k) # store E{g'(w'x)} for components

# normalize each column
for j = 1:k
w = view(W,:,j)
scale!(w, 1.0 / sqrt(sumabs2(w)))
scale!(w, 1.0 / sqrt(sum(abs2, w)))
end

# main loop
Expand Down Expand Up @@ -178,14 +178,14 @@ function fit(::Type{ICA}, X::DenseMatrix{Float64}, # sample matrix, siz
Efac = eigfact(C)
ord = sortperm(Efac.values; rev=true)
(v, P) = extract_kv(Efac, ord, k)
W0 = scale!(P, 1.0 ./ sqrt(v))
W0 = scale!(P, 1.0 ./ sqrt.(v))
# println(W0' * C * W0)
Z = W0'Z
end

# initialize
W = (isempty(winit) ? randn(size(Z,1), k) : copy(winit))::Matrix{Float64}

# invoke core algorithm
fastica!(W, Z, fun, maxiter, tol, verbose)

Expand Down
10 changes: 5 additions & 5 deletions src/lda.jl
Expand Up @@ -2,7 +2,7 @@

#### Type to represent a linear discriminant functional

abstract Discriminant{T}
@compat abstract type Discriminant{T} end

immutable LinearDiscriminant{T<:AbstractFloat} <: Discriminant{T}
w::Vector{T}
Expand Down Expand Up @@ -105,7 +105,7 @@ function multiclass_lda_stats{T<:AbstractFloat}(nc::Int, X::DenseMatrix{T}, y::A

# compute between-class scattering
mean = cmeans * (cweights ./ n)
U = scale!(cmeans .- mean, sqrt(cweights))
U = scale!(cmeans .- mean, sqrt.(cweights))
Sb = A_mul_Bt(U, U)

return MulticlassLDAStats(Vector{T}(cweights), mean, cmeans, Sw, Sb)
Expand Down Expand Up @@ -229,7 +229,7 @@ function fit{T,F<:SubspaceLDA}(::Type{F}, X::AbstractMatrix{T}, label::AbstractV
# Note Sb = Hb*Hb', Sw = Hw*Hw'
cmeans, cweights, Hw = center(X, label, nc)
dmeans = cmeans .- (normalize ? mean(cmeans, 2) : cmeans * (cweights / n))
Hb = normalize ? dmeans : dmeans * Diagonal(sqrt(cweights))
Hb = normalize ? dmeans : dmeans * Diagonal(sqrt.(cweights))
if normalize
Hw /= sqrt(n)
end
Expand Down Expand Up @@ -260,7 +260,7 @@ function lda_gsvd{T}(Hb::AbstractMatrix{T}, Hw::AbstractMatrix{T}, cweights::Abs
# Normalize
Gw = G' * Hw
nrm = Gw * Gw'
G = G ./ reshape(sqrt(diag(nrm)), 1, ncnz-1)
G = G ./ reshape(sqrt.(diag(nrm)), 1, ncnz-1)
# Also get the eigenvalues
Gw = G' * Hw
Gb = G' * Hb
Expand Down Expand Up @@ -288,7 +288,7 @@ function center{T}(X::AbstractMatrix{T}, label::AbstractVector{Int}, nc=maximum(
end
end
# Compute differences from the means
dX = Array(T, d, n)
dX = Matrix{T}(d, n)
for j = 1:n
k = label[j]
for i = 1:d
Expand Down
6 changes: 1 addition & 5 deletions src/pca.jl
Expand Up @@ -148,11 +148,7 @@ function fit{T<:AbstractFloat}(::Type{PCA}, X::DenseMatrix{T};

# delegate to core
if method == :cov
if VERSION < v"0.5.0-dev+660"
C = cov(X; vardim=2, mean=isempty(mv) ? 0 : mv)::Matrix{T}
else
C = Base.covm(X, isempty(mv) ? 0 : mv, 2)
end
C = Base.covm(X, isempty(mv) ? 0 : mv, 2)
M = pcacov(C, mv; maxoutdim=maxoutdim, pratio=pratio)
elseif method == :svd
Z = centralize(X, mv)
Expand Down
6 changes: 1 addition & 5 deletions src/ppca.jl
Expand Up @@ -180,11 +180,7 @@ function fit{T<:AbstractFloat}(::Type{PPCA}, X::DenseMatrix{T};
Z = centralize(X, mv)
M = ppcaml(Z, mv, maxoutdim=maxoutdim, tol=tol)
elseif method == :em || method == :bayes
S = if VERSION < v"0.5.0-dev+660"
cov(X; vardim=2, mean=isempty(mv) ? 0 : mv)::Matrix{T}
else
Base.covm(X, isempty(mv) ? 0 : mv, 2)
end
S = Base.covm(X, isempty(mv) ? 0 : mv, 2)
if method == :em
M = ppcaem(S, mv, n, maxoutdim=maxoutdim, tol=tol, tot=tot)
elseif method == :bayes
Expand Down
12 changes: 4 additions & 8 deletions src/whiten.jl
Expand Up @@ -6,11 +6,7 @@
#
function cov_whitening{T<:AbstractFloat}(C::Cholesky{T})
cf = C[:UL]
if VERSION < v"0.4.0-"
full(istriu(cf) ? inv(Triangular(C.UL, :U)) : inv(Triangular(C.UL, :L)'))::Matrix{T}
else
full(inv(istriu(cf) ? cf : cf'))::Matrix{T}
end
full(inv(istriu(cf) ? cf : cf'))::Matrix{T}
end

cov_whitening!{T<:AbstractFloat}(C::DenseMatrix{T}) = cov_whitening(cholfact!(C, :U))
Expand All @@ -28,15 +24,15 @@ cov_whitening{T<:AbstractFloat}(C::DenseMatrix{T}, regcoef::Real) =
immutable Whitening{T<:AbstractFloat}
mean::Vector{T}
W::Matrix{T}
function Whitening(mean::Vector{T}, W::Matrix{T})
function (::Type{Whitening{T}}){T}(mean::Vector{T}, W::Matrix{T})
d, d2 = size(W)
d == d2 || error("W must be a square matrix.")
isempty(mean) || length(mean) == d ||
throw(DimensionMismatch("Sizes of mean and W are inconsistent."))
return new(mean, W)
return new{T}(mean, W)
end
end
Whitening{T<:AbstractFloat}(mean::Vector{T}, W::Matrix{T}) = Whitening{T}(mean, W)
(::Type{Whitening}){T<:AbstractFloat}(mean::Vector{T}, W::Matrix{T}) = Whitening{T}(mean, W)

indim(f::Whitening) = size(f.W, 1)
outdim(f::Whitening) = size(f.W, 2)
Expand Down
56 changes: 25 additions & 31 deletions test/cca.jl
Expand Up @@ -27,8 +27,8 @@ M = CCA(Float64[], Float64[], Px, Py, [0.8, 0.6, 0.4])
@test yprojection(M) == Py
@test correlations(M) == [0.8, 0.6, 0.4]

@test_approx_eq xtransform(M, X) Px'X
@test_approx_eq ytransform(M, Y) Py'Y
@test xtransform(M, X) Px'X
@test ytransform(M, Y) Py'Y

## CCA with nonzero means

Expand All @@ -45,8 +45,8 @@ M = CCA(ux, uy, Px, Py, [0.8, 0.6, 0.4])
@test yprojection(M) == Py
@test correlations(M) == [0.8, 0.6, 0.4]

@test_approx_eq xtransform(M, X) Px' * (X .- ux)
@test_approx_eq ytransform(M, Y) Py' * (Y .- uy)
@test xtransform(M, X) Px' * (X .- ux)
@test ytransform(M, Y) Py' * (Y .- uy)


## prepare data
Expand All @@ -62,15 +62,9 @@ ym = vec(mean(Y, 2))
Zx = X .- xm
Zy = Y .- ym

if VERSION < v"0.5.0-dev+660"
Cxx = cov(X; vardim=2)
Cyy = cov(Y; vardim=2)
Cxy = cov(X, Y; vardim=2)
else
Cxx = cov(X, 2)
Cyy = cov(Y, 2)
Cxy = cov(X, Y, 2)
end
Cxx = cov(X, 2)
Cyy = cov(Y, 2)
Cxy = cov(X, Y, 2)
Cyx = Cxy'

## ccacov
Expand All @@ -87,12 +81,12 @@ rho = correlations(M)
@test ymean(M) == ym
@test issorted(rho; rev=true)

@test_approx_eq Px' * Cxx * Px eye(p)
@test_approx_eq Py' * Cyy * Py eye(p)
@test_approx_eq Cxy * (Cyy \ Cyx) * Px Cxx * Px * Diagonal(rho.^2)
@test_approx_eq Cyx * (Cxx \ Cxy) * Py Cyy * Py * Diagonal(rho.^2)
@test_approx_eq Px qnormalize!(Cxx \ (Cxy * Py), Cxx)
@test_approx_eq Py qnormalize!(Cyy \ (Cyx * Px), Cyy)
@test Px' * Cxx * Px eye(p)
@test Py' * Cyy * Py eye(p)
@test Cxy * (Cyy \ Cyx) * Px Cxx * Px * Diagonal(rho.^2)
@test Cyx * (Cxx \ Cxy) * Py Cyy * Py * Diagonal(rho.^2)
@test Px qnormalize!(Cxx \ (Cxy * Py), Cxx)
@test Py qnormalize!(Cyy \ (Cyx * Px), Cyy)

# Y ~ X
M = fit(CCA, Y, X; method=:cov, outdim=p)
Expand All @@ -106,12 +100,12 @@ rho = correlations(M)
@test ymean(M) == xm
@test issorted(rho; rev=true)

@test_approx_eq Px' * Cxx * Px eye(p)
@test_approx_eq Py' * Cyy * Py eye(p)
@test_approx_eq Cxy * (Cyy \ Cyx) * Px Cxx * Px * Diagonal(rho.^2)
@test_approx_eq Cyx * (Cxx \ Cxy) * Py Cyy * Py * Diagonal(rho.^2)
@test_approx_eq Px qnormalize!(Cxx \ (Cxy * Py), Cxx)
@test_approx_eq Py qnormalize!(Cyy \ (Cyx * Px), Cyy)
@test Px' * Cxx * Px eye(p)
@test Py' * Cyy * Py eye(p)
@test Cxy * (Cyy \ Cyx) * Px Cxx * Px * Diagonal(rho.^2)
@test Cyx * (Cxx \ Cxy) * Py Cyy * Py * Diagonal(rho.^2)
@test Px qnormalize!(Cxx \ (Cxy * Py), Cxx)
@test Py qnormalize!(Cyy \ (Cyx * Px), Cyy)


## ccasvd
Expand All @@ -128,9 +122,9 @@ rho = correlations(M)
@test ymean(M) == ym
@test issorted(rho; rev=true)

@test_approx_eq Px' * Cxx * Px eye(p)
@test_approx_eq Py' * Cyy * Py eye(p)
@test_approx_eq Cxy * (Cyy \ Cyx) * Px Cxx * Px * Diagonal(rho.^2)
@test_approx_eq Cyx * (Cxx \ Cxy) * Py Cyy * Py * Diagonal(rho.^2)
@test_approx_eq Px qnormalize!(Cxx \ (Cxy * Py), Cxx)
@test_approx_eq Py qnormalize!(Cyy \ (Cyx * Px), Cyy)
@test Px' * Cxx * Px eye(p)
@test Py' * Cyy * Py eye(p)
@test Cxy * (Cyy \ Cyx) * Px Cxx * Px * Diagonal(rho.^2)
@test Cyx * (Cxx \ Cxy) * Py Cyy * Py * Diagonal(rho.^2)
@test Px qnormalize!(Cxx \ (Cxy * Py), Cxx)
@test Py qnormalize!(Cyy \ (Cyx * Px), Cyy)
21 changes: 8 additions & 13 deletions test/cmds.jl
Expand Up @@ -5,10 +5,10 @@ using Compat
## testing data

function pwdists(X)
S = Base.sumabs2(X, 1)
D2 = S .+ S' - 2 * (X'X)
D2[diagind(D2)] = 0.0
return sqrt(D2)
S = sum(abs2, X, 1)
D2 = S .+ S' - 2 * (X'X)
D2[diagind(D2)] = 0.0
return sqrt.(D2)
end

X0 = randn(3, 5)
Expand All @@ -22,16 +22,16 @@ D0 = pwdists(X0)

D = gram2dmat(G0)
@test issymmetric(D)
@test_approx_eq D D0
@test D D0

G = dmat2gram(D0)
@test issymmetric(G)
@test_approx_eq gram2dmat(G) D0
@test gram2dmat(G) D0

## classical MDS

X = classical_mds(D0, 3)
@test_approx_eq pwdists(X) D0
@test pwdists(X) D0

#Test MDS embeddings in dimensions >= number of points
@test classical_mds([0 1; 1 0], 2, dowarn=false) == [-0.5 0.5; 0 0]
Expand All @@ -52,12 +52,7 @@ let
0.38095238095238093 0.4 0.21052631578947367 0.5454545454545454 0.3333333333333333 0.3181818181818182 0.23529411764705882 0.5555555555555556 0.4 1.0]
X = [-0.27529104101488666 0.006134513718202863 0.33298809606740326 0.2608994458893664 -0.46185275796909575 -0.23734315039370618 0.29972782027671513 0.03827901455843394 -0.04096713097883363 0.07742518984640051
-0.08177061420820278 -0.0044504235228030225 -0.3271919093638943 0.28206254638779243 -0.0954706915166714 -0.07137742126520012 -0.30754933764853587 0.18582658369448027 -0.03715307349750036 0.45707434094053534]
if VERSION < v"0.4.0"
#Don't have correct eigenvector testing; run a cruder test
@test_approx_eq abs(classical_mds(D, 2)) abs(X)
else
Test.test_approx_eq_modphase(classical_mds(D, 2)', X')
end
Test.test_approx_eq_modphase(classical_mds(D, 2)', X')
end

#10 - test degenerate problem
Expand Down

0 comments on commit c3d00fd

Please sign in to comment.