Skip to content

Commit

Permalink
Prepare for 0.5. No functors. Deprecated symbols. New getfield syntax.
Browse files Browse the repository at this point in the history
Fix some ambiguities.
  • Loading branch information
andreasnoack committed May 19, 2016
1 parent b67be40 commit ab23126
Show file tree
Hide file tree
Showing 14 changed files with 38 additions and 30 deletions.
1 change: 1 addition & 0 deletions REQUIRE
Expand Up @@ -2,3 +2,4 @@ julia 0.4
PDMats 0.4.1 0.5
StatsFuns 0.1.1
StatsBase 0.7.0
Compat 0.2.17
1 change: 1 addition & 0 deletions src/Distributions.jl
Expand Up @@ -5,6 +5,7 @@ module Distributions
using PDMats
using StatsFuns
using StatsBase
using Compat

import Base.Random
import Base: size, eltype, length, full, convert, show, getindex, scale, scale!, rand, rand!
Expand Down
4 changes: 2 additions & 2 deletions src/common.jl
Expand Up @@ -54,10 +54,10 @@ typealias DiscreteMatrixDistribution Distribution{Matrixvariate, Discret
typealias ContinuousMatrixDistribution Distribution{Matrixvariate, Continuous}

variate_form{VF<:VariateForm,VS<:ValueSupport}(::Type{Distribution{VF,VS}}) = VF
variate_form{T<:Distribution}(::Type{T}) = variate_form(super(T))
variate_form{T<:Distribution}(::Type{T}) = variate_form(supertype(T))

value_support{VF<:VariateForm,VS<:ValueSupport}(::Type{Distribution{VF,VS}}) = VS
value_support{T<:Distribution}(::Type{T}) = value_support(super(T))
value_support{T<:Distribution}(::Type{T}) = value_support(supertype(T))

## TODO: the following types need to be improved
abstract SufficientStats
Expand Down
1 change: 0 additions & 1 deletion src/multivariate/mvnormal.jl
Expand Up @@ -148,7 +148,6 @@ immutable MvNormalKnownCov{Cov<:AbstractPDMat}
Σ::Cov
end

MvNormalKnownCov{Cov<:AbstractPDMat}(C::Cov) = MvNormalKnownCov{Cov}(C)
MvNormalKnownCov(d::Int, σ::Real) = MvNormalKnownCov(ScalMat(d, abs2(Float64(σ))))
MvNormalKnownCov::Vector{Float64}) = MvNormalKnownCov(PDiagMat(abs2(σ)))
MvNormalKnownCov::Matrix{Float64}) = MvNormalKnownCov(PDMat(Σ))
Expand Down
8 changes: 4 additions & 4 deletions src/multivariate/mvnormalcanon.jl
Expand Up @@ -34,9 +34,9 @@ function MvNormalCanon{P<:AbstractPDMat, T<:Real}(h::Vector{T}, J::P)
MvNormalCanon{P,Vector{T}}(J \ h, h, J)
end

MvNormalCanon(h::Vector, J::Matrix) = MvNormalCanon(h, PDMat(J))
MvNormalCanon(h::Vector, prec::Vector) = MvNormalCanon(h, PDiagMat(prec))
MvNormalCanon(h::Vector, prec) = MvNormalCanon(h, ScalMat(length(h), prec))
MvNormalCanon{T<:Real}(h::Vector{T}, J::Matrix) = MvNormalCanon(h, PDMat(J))
MvNormalCanon{T<:Real}(h::Vector{T}, prec::Vector) = MvNormalCanon(h, PDiagMat(prec))
MvNormalCanon{T<:Real}(h::Vector{T}, prec) = MvNormalCanon(h, ScalMat(length(h), prec))

MvNormalCanon(J::Matrix) = MvNormalCanon(PDMat(J))
MvNormalCanon(prec::Vector) = MvNormalCanon(PDiagMat(prec))
Expand Down Expand Up @@ -85,4 +85,4 @@ unwhiten_winv!(J::AbstractPDMat, x::AbstractVecOrMat) = unwhiten!(inv(J), x)
unwhiten_winv!(J::PDiagMat, x::AbstractVecOrMat) = whiten!(J, x)
unwhiten_winv!(J::ScalMat, x::AbstractVecOrMat) = whiten!(J, x)

_rand!(d::MvNormalCanon, x::AbstractVecOrMat) = add!(unwhiten_winv!(d.J, randn!(x)), d.μ)

This comment has been minimized.

Copy link
@getzdan

getzdan May 31, 2017

Why was AbstractMatrix preferred to AbstractVecOrMat ? This somehow seems to have stopped a test from another package.

This comment has been minimized.

Copy link
@ararslan

ararslan May 31, 2017

Member

See #534 and #535

This comment has been minimized.

Copy link
@mschauer

mschauer Jun 28, 2017

Member

As in #625 this can be resolved by having -_rand!(d::MvNormalCanon, x::AbstractVector)and_rand!(d::MvNormalCanon, x::AbstractMatrix)``

_rand!(d::MvNormalCanon, x::AbstractMatrix) = add!(unwhiten_winv!(d.J, randn!(x)), d.μ)
2 changes: 1 addition & 1 deletion src/multivariate/mvtdist.jl
Expand Up @@ -133,7 +133,7 @@ function _logpdf!{T<:Real}(r::AbstractArray, d::AbstractMvTDist, x::AbstractMatr
return r
end

_pdf!{T<:Real}(r::AbstractArray, d::AbstractMvNormal, x::AbstractMatrix{T}) = exp!(_logpdf!(r, d, x))
_pdf!{T<:Real}(r::AbstractArray, d::AbstractMvTDist, x::AbstractMatrix{T}) = exp!(_logpdf!(r, d, x))

function gradlogpdf{T<:Real}(d::GenericMvTDist, x::AbstractVector{T})
z::Vector{T} = d.zeromean ? x : x - d.μ
Expand Down
5 changes: 4 additions & 1 deletion src/multivariate/vonmisesfisher.jl
Expand Up @@ -28,7 +28,10 @@ end

VonMisesFisher{T<:Real}::Vector{T}, κ::Real) = VonMisesFisher(Float64(μ), Float64(κ))

VonMisesFisher::Vector{Float64}) == vecnorm(θ); VonMisesFisher(scale(θ, 1.0 / κ), κ))
function VonMisesFisher::Vector{Float64})
κ = vecnorm(θ)
return VonMisesFisher* (1 / κ), κ)
end
VonMisesFisher{T<:Real}::Vector{T}) = VonMisesFisher(Float64(θ))

show(io::IO, d::VonMisesFisher) = show(io, d, (, ))
Expand Down
2 changes: 1 addition & 1 deletion src/show.jl
Expand Up @@ -32,7 +32,7 @@ function _use_multline_show(d::Distribution, pnames)
multline = false
tlen = 0
for (i, p) in enumerate(pnames)
pv = d.(p)
pv = getfield(d, p)
if !(isa(pv, Number) || isa(pv, NTuple) || isa(pv, AbstractVector))
multline = true
else
Expand Down
24 changes: 12 additions & 12 deletions src/univariates.jl
Expand Up @@ -185,8 +185,8 @@ for fun in [:pdf, :logpdf,
:invlogcdf, :invlogccdf,
:quantile, :cquantile]

_fun! = symbol(string('_', fun, '!'))
fun! = symbol(string(fun, '!'))
_fun! = Symbol('_', fun, '!')
fun! = Symbol(fun, '!')

@eval begin
function ($_fun!)(r::AbstractArray, d::UnivariateDistribution, X::AbstractArray)
Expand Down Expand Up @@ -302,16 +302,16 @@ macro _delegate_statsfuns(D, fpre, psyms...)
T = dt <: DiscreteUnivariateDistribution ? :Int : :Real

# function names from StatsFuns
fpdf = symbol(string(fpre, "pdf"))
flogpdf = symbol(string(fpre, "logpdf"))
fcdf = symbol(string(fpre, "cdf"))
fccdf = symbol(string(fpre, "ccdf"))
flogcdf = symbol(string(fpre, "logcdf"))
flogccdf = symbol(string(fpre, "logccdf"))
finvcdf = symbol(string(fpre, "invcdf"))
finvccdf = symbol(string(fpre, "invccdf"))
finvlogcdf = symbol(string(fpre, "invlogcdf"))
finvlogccdf = symbol(string(fpre, "invlogccdf"))
fpdf = Symbol(fpre, "pdf")
flogpdf = Symbol(fpre, "logpdf")
fcdf = Symbol(fpre, "cdf")
fccdf = Symbol(fpre, "ccdf")
flogcdf = Symbol(fpre, "logcdf")
flogccdf = Symbol(fpre, "logccdf")
finvcdf = Symbol(fpre, "invcdf")
finvccdf = Symbol(fpre, "invccdf")
finvlogcdf = Symbol(fpre, "invlogcdf")
finvlogccdf = Symbol(fpre, "invlogccdf")

# parameter fields
pargs = [Expr(:(.), :d, Expr(:quote, s)) for s in psyms]
Expand Down
2 changes: 1 addition & 1 deletion test/categorical.jl
Expand Up @@ -45,5 +45,5 @@ d = Categorical(4)
@test maximum(d) == 4
@test probs(d) == [0.25, 0.25, 0.25, 0.25]

p = scale(ones(10^6), 1.0e-6)
p = ones(10^6) * 1.0e-6
@test Distributions.isprobvec(p)
2 changes: 1 addition & 1 deletion test/mixture.jl
Expand Up @@ -118,7 +118,7 @@ function test_mixture(g::MultivariateMixture, n::Int, ns::Int)
@test isa(Xs, Matrix{Float64})
@test size(Xs) == (length(g), ns)
@test_approx_eq_eps vec(mean(Xs, 2)) mean(g) 0.01
@test_approx_eq_eps cov(Xs, vardim=2) cov(g) 0.01
@test_approx_eq_eps cov(Xs, 2) cov(g) 0.01
end

function test_params(g::AbstractMixtureModel)
Expand Down
2 changes: 1 addition & 1 deletion test/mvnormal.jl
Expand Up @@ -116,7 +116,7 @@ function _gauss_mle(x::Matrix{Float64}, w::Vector{Float64})
sw = sum(w)
mu = (x * w) * (1/sw)
z = x .- mu
C = (z * scale(w, z')) * (1/sw)
C = (z * (Diagonal(w) * z')) * (1/sw)
Base.LinAlg.copytri!(C, 'U')
return mu, C
end
Expand Down
10 changes: 7 additions & 3 deletions test/truncate.jl
@@ -1,5 +1,7 @@
# Testing discrete univariate distributions

module TestTruncate

using Distributions
import JSON
using Base.Test
Expand All @@ -9,10 +11,10 @@ using Compat
function verify_and_test_drive(jsonfile, selected, n_tsamples::Int,lower::Int,upper::Int)
R = JSON.parsefile(jsonfile)
for (ex, dct) in R
dsym = symbol(dct["dtype"])
dsym = Symbol(dct["dtype"])
dname = string(dsym)

dsymt = symbol("Truncated($(dct["dtype"]),$lower,$upper")
dsymt = Symbol("Truncated($(dct["dtype"]),$lower,$upper")
dnamet = string(dsym)

# test whether it is included in the selected list
Expand Down Expand Up @@ -52,7 +54,7 @@ function verify_and_test(d::UnivariateDistribution, dct::Dict, n_tsamples::Int)
# verify parameters
pdct = dct["params"]
for (fname, val) in pdct
f = eval(symbol(fname))
f = eval(Symbol(fname))
@assert isa(f, Function)
Base.Test.test_approx_eq(f(d.untruncated), val, "$fname(d.untruncated)", "val")
end
Expand Down Expand Up @@ -107,3 +109,5 @@ for c in ["discrete",
verify_and_test_drive(jsonfile, ARGS, 10^6,3,5)
println()
end

end
4 changes: 2 additions & 2 deletions test/univariates.jl
Expand Up @@ -9,7 +9,7 @@ using Compat
function verify_and_test_drive(jsonfile, selected, n_tsamples::Int)
R = JSON.parsefile(jsonfile)
for (ex, dct) in R
dsym = symbol(dct["dtype"])
dsym = Symbol(dct["dtype"])
dname = string(dsym)

# test whether it is included in the selected list
Expand Down Expand Up @@ -51,7 +51,7 @@ function verify_and_test(d::UnivariateDistribution, dct::Dict, n_tsamples::Int)
# verify parameters
pdct = dct["params"]
for (fname, val) in pdct
f = eval(symbol(fname))
f = eval(Symbol(fname))
@assert isa(f, Function)
Base.Test.test_approx_eq(f(d), val, "$fname(d)", "val")
end
Expand Down

0 comments on commit ab23126

Please sign in to comment.