Skip to content

Commit

Permalink
Merge pull request #88 from JuliaStats/aa/0.6
Browse files Browse the repository at this point in the history
Updates for Julia 0.6
  • Loading branch information
ararslan committed Mar 17, 2017
2 parents 10e6623 + 8a748ec commit c44172b
Show file tree
Hide file tree
Showing 19 changed files with 364 additions and 352 deletions.
1 change: 0 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ os:
- linux
- osx
julia:
- 0.4
- 0.5
- nightly
notifications:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ HypothesisTests.jl

[![Build Status](https://travis-ci.org/JuliaStats/HypothesisTests.jl.svg?branch=master)](https://travis-ci.org/JuliaStats/HypothesisTests.jl)
[![Coverage Status](https://coveralls.io/repos/JuliaStats/HypothesisTests.jl/badge.svg?branch=master)](https://coveralls.io/r/JuliaStats/HypothesisTests.jl?branch=master)
[![HypothesisTests](http://pkg.julialang.org/badges/HypothesisTests_0.4.svg)](http://pkg.julialang.org/?pkg=HypothesisTests)
[![HypothesisTests](http://pkg.julialang.org/badges/HypothesisTests_0.5.svg)](http://pkg.julialang.org/?pkg=HypothesisTests)
[![HypothesisTests](http://pkg.julialang.org/badges/HypothesisTests_0.6.svg)](http://pkg.julialang.org/?pkg=HypothesisTests)

This package implements several hypothesis tests in Julia.

Expand Down
4 changes: 2 additions & 2 deletions REQUIRE
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
julia 0.4
julia 0.5
Distributions 0.10.0
Roots
StatsBase 0.9.0
Compat 0.8.4
Compat 0.18.0
Rmath 0.1.2
Combinatorics
4 changes: 2 additions & 2 deletions src/HypothesisTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

VERSION >= v"0.4.0-dev+6521" && __precompile__()
__precompile__()

module HypothesisTests

Expand All @@ -33,7 +33,7 @@ using Rmath: pwilcox, psignrank
import StatsBase.confint

export testname, pvalue, confint
abstract HypothesisTest
@compat abstract type HypothesisTest end

check_same_length(x::AbstractVector, y::AbstractVector) = if length(x) != length(y)
throw(DimensionMismatch("Vectors must be the same length"))
Expand Down
4 changes: 2 additions & 2 deletions src/anderson_darling.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

export OneSampleADTest, KSampleADTest

abstract ADTest <: HypothesisTest
@compat abstract type ADTest <: HypothesisTest end

## ONE SAMPLE AD-TEST
### http://www.itl.nist.gov/div898/handbook/eda/section3/eda35e.htm
Expand Down Expand Up @@ -101,7 +101,7 @@ function pvalue(x::KSampleADTest)
@inbounds for i = 1:2, k = 1:5
A[k,i+1] = A[k,i] * tm[k]
end
f = A \ log(sig)
f = A \ log.(sig)

exp(f[1] + f[2]*Tk + f[3]*Tk^2)
end
Expand Down
71 changes: 36 additions & 35 deletions src/circular.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@ immutable RayleighTest <: HypothesisTest
n::Int # number of observations
end
function RayleighTest{S <: Complex}(samples::Vector{S})
s = @compat Float64(abs(sum(samples./abs(samples))))
s = Float64(abs(sum(samples./abs(samples))))
n = length(samples)
Rbar = s/n
RayleighTest(Rbar, n)
end
function RayleighTest{S <: Real}(samples::Vector{S})
s = @compat Float64(abs(sum(exp(im*samples))))
s = Float64(abs(sum(exp, im * samples)))
n = length(samples)
Rbar = s/n
RayleighTest(Rbar, n)
Expand All @@ -71,24 +71,24 @@ immutable FisherTLinearAssociation{S <: Real, T <: Real} <: HypothesisTest
rho_t::Float64 # circular correlation coefficient
theta::Vector{S} # radians of group 1
phi::Vector{T} # radians of group 2
uniformly_distributed::@compat(Union{Bool,Void}) # is distribution of theta and phi uniform?
uniformly_distributed::Union{Bool,Void} # is distribution of theta and phi uniform?
end
function FisherTLinearAssociation{Stheta <: Real, Sphi <: Real}(theta::Vector{Stheta},
phi::Vector{Sphi}, uniformly_distributed::@compat(Union{Bool,Void}))
phi::Vector{Sphi}, uniformly_distributed::Union{Bool,Void})
check_same_length(theta, phi)

A = sum(cos(theta).*cos(phi))
B = sum(sin(theta).*sin(phi))
C = sum(cos(theta).*sin(phi))
D = sum(sin(theta).*cos(phi))
A = sum(cos.(theta).*cos.(phi))
B = sum(sin.(theta).*sin.(phi))
C = sum(cos.(theta).*sin.(phi))
D = sum(sin.(theta).*cos.(phi))
T = A*B-C*D

# Notation drawn from Fisher, 1993
n = length(theta)
E = sum(cos(2*theta))
F = sum(sin(2*theta))
G = sum(cos(2*phi))
H = sum(sin(2*phi))
E = sum(cos, 2*theta)
F = sum(sin, 2*theta)
G = sum(cos, 2*phi)
H = sum(sin, 2*phi)
rho_t = 4*T/sqrt((n^2 - E^2 - F^2)*(n^2-G^2-H^2))
FisherTLinearAssociation(rho_t, theta, phi, uniformly_distributed)
end
Expand All @@ -105,19 +105,19 @@ end
# For large samples, compute the distribution and statistic of T
function tlinear_Z(x::FisherTLinearAssociation)
n = length(x.theta)
theta_resultant = sum(exp(im*x.theta))
phi_resultant = sum(exp(im*x.phi))
theta_resultant = sum(exp, im * x.theta)
phi_resultant = sum(exp, im * x.phi)
theta_resultant_angle = angle(theta_resultant)
phi_resultant_angle = angle(phi_resultant)
alpha_2_theta = mean(cos(2*(x.theta-theta_resultant_angle)))
beta_2_theta = mean(sin(2*(x.theta-theta_resultant_angle)))
alpha_2_phi = mean(cos(2*(x.phi-phi_resultant_angle)))
beta_2_phi = mean(sin(2*(x.phi-phi_resultant_angle)))
U_theta = (1-alpha_2_theta^2-beta_2_theta^2)/2
U_phi = (1-alpha_2_phi^2-beta_2_phi^2)/2
V_theta = (abs2(theta_resultant)/n^2)*(1-alpha_2_theta)
V_phi = (abs2(phi_resultant)/n^2)*(1-alpha_2_phi)
sqrt(n)*U_theta*U_phi*rho_t/sqrt(V_theta*V_phi)
phi_resultant_angle = angle(phi_resultant)
alpha_2_theta = mean(cos, 2 * (x.theta - theta_resultant_angle))
beta_2_theta = mean(sin, 2 * (x.theta - theta_resultant_angle))
alpha_2_phi = mean(cos, 2 * (x.phi - phi_resultant_angle))
beta_2_phi = mean(sin, 2 * (x.phi - phi_resultant_angle))
U_theta = (1 - alpha_2_theta^2 - beta_2_theta^2) / 2
U_phi = (1-alpha_2_phi^2-beta_2_phi^2)/2
V_theta = (abs2(theta_resultant) / n^2) * (1 - alpha_2_theta)
V_phi = (abs2(phi_resultant) / n^2) * (1 - alpha_2_phi)
return sqrt(n) * U_theta * U_phi * rho_t / sqrt(V_theta * V_phi)

# Alternative computational strategy from Fisher and Lee (1983)
# a1 = [mean(cos(theta)), mean(cos(phi))]
Expand All @@ -141,10 +141,10 @@ function pvalue(x::FisherTLinearAssociation; tail=:both)
# distribution is uniform, use a permutation test.

# "For n < 25, use a randomisation test based on the quantity T = AB - CD"
ct = cos(x.theta)
st = sin(x.theta)
cp = cos(x.phi)
sp = sin(x.phi)
ct = cos.(x.theta)
st = sin.(x.theta)
cp = cos.(x.phi)
sp = sin.(x.phi)
T = sum(ct.*cp)*sum(st.*sp)-sum(ct.*sp)*sum(st.*cp)
greater = 0

Expand Down Expand Up @@ -192,16 +192,17 @@ end
function JammalamadakaCircularCorrelation{S <: Real, T <: Real}(alpha::Vector{S}, beta::Vector{T})
check_same_length(alpha, beta)
# calculate sample mean directions
alpha_bar = angle(sum(exp(im*alpha)))
beta_bar = angle(sum(exp(im*beta)))
r = sum(sin(alpha .- alpha_bar).*sin(beta .- beta_bar))/sqrt(sum(sin(alpha .- alpha_bar).^2)*sum(sin(beta .- beta_bar).^2))
alpha_bar = angle(sum(exp, im * alpha))
beta_bar = angle(sum(exp, im * beta))
r = sum(sin.(alpha .- alpha_bar) .* sin.(beta .- beta_bar)) /
sqrt(sum(sin.(alpha .- alpha_bar).^2) * sum(sin.(beta .- beta_bar).^2))

sin2_alpha = sin(alpha .- alpha_bar).^2
sin2_beta = sin(beta .- beta_bar).^2
sin2_alpha = sin.(alpha .- alpha_bar).^2
sin2_beta = sin.(beta .- beta_bar).^2
lambda_20 = mean(sin2_alpha)
lambda_02 = mean(sin2_beta)
lambda_22 = mean(sin2_alpha.*sin2_beta)
Z = sqrt(length(alpha)*lambda_20*lambda_02/lambda_22)*r
lambda_22 = mean(sin2_alpha .* sin2_beta)
Z = sqrt(length(alpha) * lambda_20 * lambda_02 / lambda_22) * r

JammalamadakaCircularCorrelation(r, Z)
end
Expand Down
2 changes: 1 addition & 1 deletion src/common.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,4 +56,4 @@ function tiedrank_adj!(ord::AbstractVector, v::AbstractArray)
(ord, tieadj)
end

tiedrank_adj(v::AbstractArray) = tiedrank_adj!(Array(Float64, length(v)), v)
tiedrank_adj(v::AbstractArray) = tiedrank_adj!(Vector{Float64}(length(v)), v)
6 changes: 3 additions & 3 deletions src/kolmogorov_smirnov.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ export
ExactOneSampleKSTest,
ApproximateOneSampleKSTest, ApproximateTwoSampleKSTest

abstract KSTest <: HypothesisTest
abstract ApproximateKSTest <: KSTest
abstract ExactKSTest <: KSTest
@compat abstract type KSTest <: HypothesisTest end
@compat abstract type ApproximateKSTest <: KSTest end
@compat abstract type ExactKSTest <: KSTest end

population_param_of_interest(x::KSTest) = ("Supremum of CDF differences", 0.0, x.δ) # parameter of interest: name, value under h0, point estimate

Expand Down
4 changes: 2 additions & 2 deletions src/kruskal_wallis.jl
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ function kwstats{T<:Real}(groups::AbstractVector{T}...)
C = 1-tieadj/(n^3 - n)

# compute rank sums
R_i = Array(Float64, length(groups))
R_i = Vector{Float64}(length(groups))
n_end = 0
for i=1:length(groups)
R_i[i] = sum(ranks[n_end+1:n_end+n_i[i]])
Expand All @@ -79,4 +79,4 @@ function kwstats{T<:Real}(groups::AbstractVector{T}...)
H /= C

(H, R_i, C, n_i)
end
end

0 comments on commit c44172b

Please sign in to comment.