From c175d7ff1cdf3cea94fce6d87a26d0b62586fd04 Mon Sep 17 00:00:00 2001 From: Tim Holy Date: Fri, 3 Oct 2025 02:23:28 -0500 Subject: [PATCH] Remove all `@inbounds` annotations Julia is quite good at eliminating bounds checks on its own, and incorrect use of `@inbounds` can lead to hard-to-diagnose bugs. There are also cases where adding it actively hurts performance (e.g., https://github.com/JuliaLang/julia/issues/48245). It's much better to allow the sophisticated analysis performed by inference to make this decision and it is to reflexively add `@inbounds` everywhere. Fixes #1265. --- perf/mixtures.jl | 12 +++--- perf/samplers.jl | 10 +++-- src/cholesky/lkjcholesky.jl | 10 ++--- src/common.jl | 12 +++--- src/genericrand.jl | 20 +++++----- src/matrix/wishart.jl | 2 +- src/mixtures/mixturemodel.jl | 12 +++--- src/multivariate/dirichlet.jl | 38 +++++++++---------- src/multivariate/dirichletmultinomial.jl | 12 +++--- src/multivariate/multinomial.jl | 24 ++++++------ src/multivariate/mvlognormal.jl | 12 +++--- src/multivariate/mvnormal.jl | 20 +++++----- src/multivariates.jl | 10 ++--- src/product.jl | 14 +++---- src/reshaped.jl | 8 ++-- src/samplers/binomial.jl | 4 +- src/samplers/discretenonparametric.jl | 2 +- src/samplers/multinomial.jl | 10 ++--- src/samplers/poisson.jl | 4 +- src/samplers/vonmisesfisher.jl | 6 +-- src/univariate/continuous/gamma.jl | 4 +- src/univariate/continuous/inversegaussian.jl | 2 +- src/univariate/continuous/normal.jl | 16 ++++---- src/univariate/discrete/categorical.jl | 8 ++-- .../discrete/discretenonparametric.jl | 16 ++++---- src/univariate/discrete/poisson.jl | 4 +- src/univariate/discrete/poissonbinomial.jl | 10 ++--- src/univariates.jl | 4 +- test/testutils.jl | 10 ++--- 29 files changed, 160 insertions(+), 156 deletions(-) diff --git a/perf/mixtures.jl b/perf/mixtures.jl index eafe9acae5..083f5de208 100644 --- a/perf/mixtures.jl +++ b/perf/mixtures.jl @@ -9,7 +9,7 @@ function current_master(d::AbstractMixtureModel, x) p = probs(d) @assert length(p) == K v = 0.0 - @inbounds for i in eachindex(p) + for i in eachindex(p) pi = p[i] if pi > 0.0 c = component(d, i) @@ -28,7 +28,7 @@ function improved_version(d, x) p = probs(d) return sum(enumerate(p)) do (i, pi) if pi > 0 - @inbounds c = component(d, i) + c = component(d, i) pdf(c, x) * pi else zero(eltype(p)) @@ -59,7 +59,7 @@ function forloop(d, x) ps = probs(d) cs = components(d) s = zero(eltype(ps)) - @inbounds for i in eachindex(ps) + for i in eachindex(ps) if ps[i] > 0 s += ps[i] * pdf(cs[i], x) end @@ -70,13 +70,13 @@ end function indexed_sum_comp(d, x) ps = probs(d) cs = components(d) - @inbounds sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) + sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) end function indexed_boolprod(d, x) ps = probs(d) cs = components(d) - @inbounds sum((ps[i] > 0) * (ps[i] * pdf(cs[i], x)) for i in eachindex(ps)) + sum((ps[i] > 0) * (ps[i] * pdf(cs[i], x)) for i in eachindex(ps)) end function indexed_boolprod_noinbound(d, x) @@ -89,7 +89,7 @@ function sumcomp_cond(d, x) ps = probs(d) cs = components(d) s = zero(eltype(ps)) - @inbounds sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) + sum(ps[i] * pdf(cs[i], x) for i in eachindex(ps) if ps[i] > 0) end distributions = [ diff --git a/perf/samplers.jl b/perf/samplers.jl index 5dc5fb2692..e97e245faa 100644 --- a/perf/samplers.jl +++ b/perf/samplers.jl @@ -46,9 +46,13 @@ if haskey(ENV, "binomial") && ENV["binomial"] != "skip" nvals = haskey(ENV, "CI") ? [2] : 2 .^ (1:12) pvals = haskey(ENV, "CI") ? [0.3] : [0.3, 0.5, 0.9] for n in nvals, p in pvals - s = ST(n, p) - b = @benchmark rand($mt, $s) - @info "(n,p): $((n,p)), result: $b" + try + s = ST(n, p) + b = @benchmark rand($mt, $s) + @info "(n,p): $((n,p)), result: $b" + catch e + @warn "Failed for (n,p): $((n,p)), error: $e" + end end end end diff --git a/src/cholesky/lkjcholesky.jl b/src/cholesky/lkjcholesky.jl index 3455bf8bb1..2672574a55 100644 --- a/src/cholesky/lkjcholesky.jl +++ b/src/cholesky/lkjcholesky.jl @@ -95,7 +95,7 @@ function insupport(d::LKJCholesky, R::LinearAlgebra.Cholesky) (isreal(factors) && size(factors, 1) == p) || return false iinds, jinds = axes(factors) # check that the diagonal of U'*U or L*L' is all ones - @inbounds if R.uplo === 'U' + if R.uplo === 'U' for (j, jind) in enumerate(jinds) col_iinds = view(iinds, 1:j) sum(abs2, view(factors, col_iinds, jind)) ≈ 1 || return false @@ -247,12 +247,12 @@ function _lkj_cholesky_onion_tri!( β = η + (d - 2)//2 # 1. Initialization w0 = 2 * rand(rng, Beta(β, β)) - 1 - @inbounds if uplo === :L + if uplo === :L A[2, 1] = w0 else A[1, 2] = w0 end - @inbounds A[2, 2] = sqrt(1 - w0^2) + A[2, 2] = sqrt(1 - w0^2) # 2. Loop, each iteration k adds row/column k+1 for k in 2:(d - 1) # (a) @@ -261,11 +261,11 @@ function _lkj_cholesky_onion_tri!( y = rand(rng, Beta(k//2, β)) # (c)-(e) # w is directionally uniform vector of length √y - @inbounds w = @views uplo === :L ? A[k + 1, 1:k] : A[1:k, k + 1] + w = @views uplo === :L ? A[k + 1, 1:k] : A[1:k, k + 1] Random.randn!(rng, w) rmul!(w, sqrt(y) / norm(w)) # normalize so new row/column has unit norm - @inbounds A[k + 1, k + 1] = sqrt(1 - y) + A[k + 1, k + 1] = sqrt(1 - y) end # 3. return A diff --git a/src/common.jl b/src/common.jl index 899cca41d2..4219061001 100644 --- a/src/common.jl +++ b/src/common.jl @@ -235,12 +235,12 @@ See also: [`logpdf`](@ref). ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end - return @inbounds map(Base.Fix1(pdf, d), eachvariate(x, variate_form(typeof(d)))) + return map(Base.Fix1(pdf, d), eachvariate(x, variate_form(typeof(d)))) end end function _pdf(d::Distribution{ArrayLikeVariate{N}}, x::AbstractArray{<:Real,N}) where {N} - return exp(@inbounds logpdf(d, x)) + return exp(logpdf(d, x)) end """ @@ -276,7 +276,7 @@ See also: [`pdf`](@ref), [`gradlogpdf`](@ref). ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end - return @inbounds map(Base.Fix1(logpdf, d), eachvariate(x, variate_form(typeof(d)))) + return map(Base.Fix1(logpdf, d), eachvariate(x, variate_form(typeof(d)))) end end @@ -393,7 +393,7 @@ function _pdf!( d::Distribution{<:ArrayLikeVariate}, x::AbstractArray{<:Real}, ) - @inbounds logpdf!(out, d, x) + logpdf!(out, d, x) map!(exp, out, out) return out end @@ -443,7 +443,7 @@ function _logpdf!( d::Distribution{<:ArrayLikeVariate}, x::AbstractArray{<:Real}, ) - @inbounds map!(Base.Fix1(logpdf, d), out, eachvariate(x, variate_form(typeof(d)))) + map!(Base.Fix1(logpdf, d), out, eachvariate(x, variate_form(typeof(d)))) return out end @@ -472,7 +472,7 @@ Base.@propagate_inbounds @inline function loglikelihood( ntuple(i -> size(x, i), Val(N)) == size(d) || throw(DimensionMismatch("inconsistent array dimensions")) end - return @inbounds sum(Base.Fix1(logpdf, d), eachvariate(x, ArrayLikeVariate{N})) + return sum(Base.Fix1(logpdf, d), eachvariate(x, ArrayLikeVariate{N})) end end Base.@propagate_inbounds function loglikelihood( diff --git a/src/genericrand.jl b/src/genericrand.jl index 6b3b213f16..bcc1969d10 100644 --- a/src/genericrand.jl +++ b/src/genericrand.jl @@ -26,13 +26,13 @@ rand(rng::AbstractRNG, s::Sampleable, dim1::Int, moredims::Int...) = # default fallback (redefined for univariate distributions) function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}) - return @inbounds rand!(rng, s, Array{eltype(s)}(undef, size(s))) + return rand!(rng, s, Array{eltype(s)}(undef, size(s))) end # multiple samples function rand(rng::AbstractRNG, s::Sampleable{Univariate}, dims::Dims) out = Array{eltype(s)}(undef, dims) - return @inbounds rand!(rng, sampler(s), out) + return rand!(rng, sampler(s), out) end function rand( rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate}, dims::Dims, @@ -40,16 +40,16 @@ function rand( sz = size(s) ax = map(Base.OneTo, dims) out = [Array{eltype(s)}(undef, sz) for _ in Iterators.product(ax...)] - return @inbounds rand!(rng, sampler(s), out, false) + return rand!(rng, sampler(s), out, false) end # these are workarounds for sampleables that incorrectly base `eltype` on the parameters function rand(rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}) - return @inbounds rand!(rng, sampler(s), Array{float(eltype(s))}(undef, size(s))) + return rand!(rng, sampler(s), Array{float(eltype(s))}(undef, size(s))) end function rand(rng::AbstractRNG, s::Sampleable{Univariate,Continuous}, dims::Dims) out = Array{float(eltype(s))}(undef, dims) - return @inbounds rand!(rng, sampler(s), out) + return rand!(rng, sampler(s), out) end function rand( rng::AbstractRNG, s::Sampleable{<:ArrayLikeVariate,Continuous}, dims::Dims, @@ -57,7 +57,7 @@ function rand( sz = size(s) ax = map(Base.OneTo, dims) out = [Array{float(eltype(s))}(undef, sz) for _ in Iterators.product(ax...)] - return @inbounds rand!(rng, sampler(s), out, false) + return rand!(rng, sampler(s), out, false) end """ @@ -113,7 +113,7 @@ function _rand!( s::Sampleable{<:ArrayLikeVariate}, x::AbstractArray{<:Real}, ) - @inbounds for xi in eachvariate(x, variate_form(typeof(s))) + for xi in eachvariate(x, variate_form(typeof(s))) rand!(rng, s, xi) end return x @@ -125,7 +125,7 @@ Base.@propagate_inbounds function rand!( x::AbstractArray{<:AbstractArray{<:Real,N}}, ) where {N} sz = size(s) - allocate = !all(isassigned(x, i) && size(@inbounds x[i]) == sz for i in eachindex(x)) + allocate = !all(isassigned(x, i) && size(x[i]) == sz for i in eachindex(x)) return rand!(rng, s, x, allocate) end @@ -160,11 +160,11 @@ function _rand!( allocate::Bool, ) where {N} if allocate - @inbounds for i in eachindex(x) + for i in eachindex(x) x[i] = rand(rng, s) end else - @inbounds for xi in x + for xi in x rand!(rng, s, xi) end end diff --git a/src/matrix/wishart.jl b/src/matrix/wishart.jl index e8da450060..701df1d5aa 100644 --- a/src/matrix/wishart.jl +++ b/src/matrix/wishart.jl @@ -217,7 +217,7 @@ function _wishart_genA!(rng::AbstractRNG, A::AbstractMatrix, df::Real) T = eltype(A) z = zero(T) axes1 = axes(A, 1) - @inbounds for (j, jdx) in enumerate(axes(A, 2)), (i, idx) in enumerate(axes1) + for (j, jdx) in enumerate(axes(A, 2)), (i, idx) in enumerate(axes1) A[idx, jdx] = if i < j z elseif i > j diff --git a/src/mixtures/mixturemodel.jl b/src/mixtures/mixturemodel.jl index 7f36642900..f0aeebce99 100644 --- a/src/mixtures/mixturemodel.jl +++ b/src/mixtures/mixturemodel.jl @@ -295,7 +295,7 @@ function _mixpdf!(r::AbstractArray, d::AbstractMixtureModel, x) p = probs(d) fill!(r, 0.0) t = Array{eltype(p)}(undef, size(r)) - @inbounds for i in eachindex(p) + for i in eachindex(p) pi = p[i] if pi > 0.0 if d isa UnivariateMixture @@ -321,7 +321,7 @@ function _mixlogpdf!(r::AbstractArray, d::AbstractMixtureModel, x) n = length(r) Lp = Matrix{eltype(p)}(undef, n, K) m = fill(-Inf, n) - @inbounds for i in eachindex(p) + for i in eachindex(p) pi = p[i] if pi > 0.0 lpri = log(pi) @@ -346,7 +346,7 @@ function _mixlogpdf!(r::AbstractArray, d::AbstractMixtureModel, x) end fill!(r, 0.0) - @inbounds for i = 1:K + for i = 1:K if p[i] > 0.0 lp_i = view(Lp, :, i) for j = 1:n @@ -355,7 +355,7 @@ function _mixlogpdf!(r::AbstractArray, d::AbstractMixtureModel, x) end end - @inbounds for j = 1:n + for j = 1:n r[j] = log(r[j]) + m[j] end return r @@ -479,9 +479,9 @@ rand(rng::AbstractRNG, d::MixtureModel{Univariate}) = # multivariate mixture sampler for a vector _rand!(rng::AbstractRNG, s::MixtureSampler{Multivariate}, x::AbstractVector{<:Real}) = - @inbounds rand!(rng, s.csamplers[rand(rng, s.psampler)], x) + rand!(rng, s.csamplers[rand(rng, s.psampler)], x) # if only a single sample is requested, no alias table is created _rand!(rng::AbstractRNG, d::MixtureModel{Multivariate}, x::AbstractVector{<:Real}) = - @inbounds rand!(rng, component(d, rand(rng, d.prior)), x) + rand!(rng, component(d, rand(rng, d.prior)), x) sampler(d::MixtureModel) = MixtureSampler(d) diff --git a/src/multivariate/dirichlet.jl b/src/multivariate/dirichlet.jl index b24980ec98..0717cdfe1e 100644 --- a/src/multivariate/dirichlet.jl +++ b/src/multivariate/dirichlet.jl @@ -95,11 +95,11 @@ function cov(d::Dirichlet) αj = α[j] αjc = αj * c for i in 1:(j-1) - @inbounds C[i,j] = C[j,i] + C[i,j] = C[j,i] end - @inbounds C[j,j] = (α0 - αj) * αjc + C[j,j] = (α0 - αj) * αjc for i in (j+1):k - @inbounds C[i,j] = - α[i] * αjc + C[i,j] = - α[i] * αjc end end @@ -158,7 +158,7 @@ function _rand!(rng::AbstractRNG, d::Union{Dirichlet,DirichletCanon}, x::AbstractVector{<:Real}) for (i, αi) in zip(eachindex(x), d.alpha) - @inbounds x[i] = rand(rng, Gamma(αi)) + x[i] = rand(rng, Gamma(αi)) end lmul!(inv(sum(x)), x) # this returns x end @@ -193,7 +193,7 @@ function suffstats(::Type{<:Dirichlet}, P::AbstractMatrix{Float64}) slogp = zeros(K) for i = 1:n for k = 1:K - @inbounds slogp[k] += log(P[k,i]) + slogp[k] += log(P[k,i]) end end DirichletStats(slogp, n) @@ -211,10 +211,10 @@ function suffstats(::Type{<:Dirichlet}, P::AbstractMatrix{Float64}, slogp = zeros(K) for i = 1:n - @inbounds wi = w[i] + wi = w[i] tw += wi for k = 1:K - @inbounds slogp[k] += log(P[k,i]) * wi + slogp[k] += log(P[k,i]) * wi end end DirichletStats(slogp, tw) @@ -229,8 +229,8 @@ function _dirichlet_mle_init2(μ::Vector{Float64}, γ::Vector{Float64}) α0 = 0. for k = 1:K - @inbounds μk = μ[k] - @inbounds γk = γ[k] + μk = μ[k] + γk = γ[k] ak = (μk - γk) / (γk - μk * μk) α0 += ak end @@ -262,12 +262,12 @@ function dirichlet_mle_init(P::AbstractMatrix{Float64}, w::AbstractArray{Float64 tw = 0.0 for i in 1:n - @inbounds wi = w[i] + wi = w[i] tw += wi for k in 1:K pk = P[k, i] - @inbounds μ[k] += pk * wi - @inbounds γ[k] += pk * pk * wi + μ[k] += pk * wi + γ[k] += pk * pk * wi end end @@ -310,12 +310,12 @@ function fit_dirichlet!(elogp::Vector{Float64}, α::Vector{Float64}; iqs = 0. for k = 1:K - @inbounds ak = α[k] - @inbounds g[k] = gk = digam_α0 - digamma(ak) + elogp[k] - @inbounds iq[k] = - 1.0 / trigamma(ak) + ak = α[k] + g[k] = gk = digam_α0 - digamma(ak) + elogp[k] + iq[k] = - 1.0 / trigamma(ak) - @inbounds b += gk * iq[k] - @inbounds iqs += iq[k] + b += gk * iq[k] + iqs += iq[k] agk = abs(gk) if agk > gnorm @@ -327,8 +327,8 @@ function fit_dirichlet!(elogp::Vector{Float64}, α::Vector{Float64}; # update α for k = 1:K - @inbounds α[k] -= (g[k] - b) * iq[k] - @inbounds if α[k] < 1.0e-12 + α[k] -= (g[k] - b) * iq[k] + if α[k] < 1.0e-12 α[k] = 1.0e-12 end end diff --git a/src/multivariate/dirichletmultinomial.jl b/src/multivariate/dirichletmultinomial.jl index eb15990cb2..878467fc27 100644 --- a/src/multivariate/dirichletmultinomial.jl +++ b/src/multivariate/dirichletmultinomial.jl @@ -62,7 +62,7 @@ function var(d::DirichletMultinomial{T}) where T <: Real v = fill(d.n * (d.n + d.α0) / (1 + d.α0), length(d)) p = d.α / d.α0 for i in eachindex(v) - @inbounds v[i] *= p[i] * (1 - p[i]) + v[i] *= p[i] * (1 - p[i]) end v end @@ -71,7 +71,7 @@ function cov(d::DirichletMultinomial{<:Real}) c = d.α * d.α' lmul!(-d.n * (d.n + d.α0) / (d.α0^2 * (1 + d.α0)), c) for (i, vi) in zip(diagind(c), v) - @inbounds c[i] = vi + c[i] = vi end c end @@ -89,7 +89,7 @@ end function _logpdf(d::DirichletMultinomial{S}, x::AbstractVector{T}) where {T<:Real, S<:Real} c = loggamma(S(d.n + 1)) + loggamma(d.α0) - loggamma(d.n + d.α0) for j in eachindex(x) - @inbounds xj, αj = x[j], d.α[j] + xj, αj = x[j], d.α[j] c += loggamma(xj + αj) - loggamma(xj + 1) - loggamma(αj) end c @@ -114,7 +114,7 @@ function suffstats(::Type{<:DirichletMultinomial}, x::Matrix{T}) where T<:Real all(ns .== n) || error("Each sample in X should sum to the same value.") d, m = size(x) s = zeros(d, n) - @inbounds for k in 1:n, i in 1:m, j in 1:d + for k in 1:n, i in 1:m, j in 1:d if x[j, i] >= k s[j, k] += 1.0 end @@ -128,7 +128,7 @@ function suffstats(::Type{<:DirichletMultinomial}, x::Matrix{T}, w::Array{Float6 all(ns .== n) || error("Each sample in X should sum to the same value.") d, m = size(x) s = zeros(d, n) - @inbounds for k in 1:n, i in 1:m, j in 1:d + for k in 1:n, i in 1:m, j in 1:d if x[j, i] >= k s[j, k] += w[i] end @@ -140,7 +140,7 @@ function fit_mle(::Type{<:DirichletMultinomial}, ss::DirichletMultinomialStats; k = size(ss.s, 2) α = ones(size(ss.s, 1)) rng = 0.0:(k - 1) - @inbounds for iter in 1:maxiter + for iter in 1:maxiter α_old = copy(α) αsum = sum(α) denom = ss.tw * sum(inv, αsum .+ rng) diff --git a/src/multivariate/multinomial.jl b/src/multivariate/multinomial.jl index c4db44b85b..f365450cab 100644 --- a/src/multivariate/multinomial.jl +++ b/src/multivariate/multinomial.jl @@ -68,7 +68,7 @@ function var(d::Multinomial{T}) where T<:Real v = Vector{T}(undef, k) for i = 1:k - @inbounds p_i = p[i] + p_i = p[i] v[i] = n * p_i * (1 - p_i) end v @@ -83,15 +83,15 @@ function cov(d::Multinomial{T}) where T<:Real for j = 1:k pj = p[j] for i = 1:j-1 - @inbounds C[i,j] = - n * p[i] * pj + C[i,j] = - n * p[i] * pj end - @inbounds C[j,j] = n * pj * (1-pj) + C[j,j] = n * pj * (1-pj) end for j = 1:k-1 for i = j+1:k - @inbounds C[i,j] = C[j,i] + C[i,j] = C[j,i] end end C @@ -137,7 +137,7 @@ function insupport(d::Multinomial, x::AbstractVector{T}) where T<:Real length(x) == k || return false s = 0.0 for i = 1:k - @inbounds xi = x[i] + xi = x[i] if !(isinteger(xi) && xi >= 0) return false end @@ -154,8 +154,8 @@ function _logpdf(d::Multinomial, x::AbstractVector{T}) where T<:Real insupport(d,x) || return -R(Inf) s = R(loggamma(n + 1)) for i = 1:length(p) - @inbounds xi = x[i] - @inbounds p_i = p[i] + xi = x[i] + p_i = p[i] s -= R(loggamma(R(xi) + 1)) s += xlogy(xi, p_i) end @@ -189,8 +189,8 @@ function suffstats(::Type{<:Multinomial}, x::Matrix{T}) where T<:Real for j = 1:size(x,2) nj = zero(T) for i = 1:K - @inbounds xi = x[i,j] - @inbounds scnts[i] += xi + xi = x[i,j] + scnts[i] += xi nj += xi end @@ -213,11 +213,11 @@ function suffstats(::Type{<:Multinomial}, x::Matrix{T}, w::Array{Float64}) where for j = 1:size(x,2) nj = zero(T) - @inbounds wj = w[j] + wj = w[j] tw += wj for i = 1:K - @inbounds xi = x[i,j] - @inbounds scnts[i] += xi * wj + xi = x[i,j] + scnts[i] += xi * wj nj += xi end diff --git a/src/multivariate/mvlognormal.jl b/src/multivariate/mvlognormal.jl index 1eecd38c2f..046f709493 100644 --- a/src/multivariate/mvlognormal.jl +++ b/src/multivariate/mvlognormal.jl @@ -27,7 +27,7 @@ abstract type AbstractMvLogNormal <: ContinuousMultivariateDistribution end function insupport(::Type{D},x::AbstractVector{T}) where {T<:Real,D<:AbstractMvLogNormal} for i=1:length(x) - @inbounds 0.0 size(x, N + i), Val(M - N)) - return @inbounds loglikelihood(dist, reshape(x, size(dist)..., trailingsize...)) + return loglikelihood(dist, reshape(x, size(dist)..., trailingsize...)) end # sampling @@ -92,7 +92,7 @@ function _rand!( x::AbstractArray{<:Real,N} ) where {N} dist = d.dist - @inbounds rand!(rng, dist, reshape(x, size(dist))) + rand!(rng, dist, reshape(x, size(dist))) return x end diff --git a/src/samplers/binomial.jl b/src/samplers/binomial.jl index 340a07424c..efbe30909d 100644 --- a/src/samplers/binomial.jl +++ b/src/samplers/binomial.jl @@ -11,9 +11,9 @@ function binompvec(n::Int, p::Float64) else q = 1.0 - p a = p / q - @inbounds pv[1] = pk = q ^ n + pv[1] = pk = q ^ n for k = 1:n - @inbounds pv[k+1] = (pk *= ((n - k + 1) / k) * a) + pv[k+1] = (pk *= ((n - k + 1) / k) * a) end end return pv diff --git a/src/samplers/discretenonparametric.jl b/src/samplers/discretenonparametric.jl index ce964a3ef8..747eb03bdb 100644 --- a/src/samplers/discretenonparametric.jl +++ b/src/samplers/discretenonparametric.jl @@ -20,4 +20,4 @@ DiscreteNonParametricSampler(support::S, probs::AbstractVector{<:Real} DiscreteNonParametricSampler{T,S}(support, probs) rand(rng::AbstractRNG, s::DiscreteNonParametricSampler) = - (@inbounds v = s.support[rand(rng, s.aliastable)]; v) + (v = s.support[rand(rng, s.aliastable)]; v) diff --git a/src/samplers/multinomial.jl b/src/samplers/multinomial.jl index 7ce8b89730..eed35494ac 100644 --- a/src/samplers/multinomial.jl +++ b/src/samplers/multinomial.jl @@ -10,10 +10,10 @@ function multinom_rand!(rng::AbstractRNG, n::Int, p::AbstractVector{<:Real}, while i < km1 && n > 0 i += 1 - @inbounds pi = p[i] + pi = p[i] if pi < rp xi = rand(rng, Binomial(n, Float64(pi / rp))) - @inbounds x[i] = xi + x[i] = xi n -= xi rp -= pi else @@ -21,18 +21,18 @@ function multinom_rand!(rng::AbstractRNG, n::Int, p::AbstractVector{<:Real}, # from Binomial. Just assign remaining counts # to xi. - @inbounds x[i] = n + x[i] = n n = 0 # rp = 0.0 (no need for this, as rp is no longer needed) end end if i == km1 - @inbounds x[k] = n + x[k] = n else # n must have been zero z = zero(eltype(x)) for j = i+1 : k - @inbounds x[j] = z + x[j] = z end end diff --git a/src/samplers/poisson.jl b/src/samplers/poisson.jl index 64898ec2fe..7e6871ac40 100644 --- a/src/samplers/poisson.jl +++ b/src/samplers/poisson.jl @@ -2,9 +2,9 @@ function poissonpvec(μ::Float64, n::Int) # Poisson probabilities, from 0 to n pv = Vector{Float64}(undef, n+1) - @inbounds pv[1] = p = exp(-μ) + pv[1] = p = exp(-μ) for i = 1:n - @inbounds pv[i+1] = (p *= (μ / i)) + pv[i+1] = (p *= (μ / i)) end return pv end diff --git a/src/samplers/vonmisesfisher.jl b/src/samplers/vonmisesfisher.jl index fd3eb2df08..ee7db538bf 100644 --- a/src/samplers/vonmisesfisher.jl +++ b/src/samplers/vonmisesfisher.jl @@ -32,14 +32,14 @@ function _rand!(rng::AbstractRNG, spl::VonMisesFisherSampler, x::AbstractVector) p = spl.p x[1] = w s = 0.0 - @inbounds for i = 2:p + for i = 2:p x[i] = xi = randn(rng) s += abs2(xi) end # normalize x[2:p] r = sqrt((1.0 - abs2(w)) / s) - @inbounds for i = 2:p + for i = 2:p x[i] *= r end @@ -94,7 +94,7 @@ function _vmf_householder_vec(μ::Vector{Float64}) s = sqrt(-2*v[1]) v[1] /= s - @inbounds for i in 2:p + for i in 2:p v[i] = μ[i] / s end diff --git a/src/univariate/continuous/gamma.jl b/src/univariate/continuous/gamma.jl index 866255fb7d..83a80b455b 100644 --- a/src/univariate/continuous/gamma.jl +++ b/src/univariate/continuous/gamma.jl @@ -153,8 +153,8 @@ function suffstats(::Type{<:Gamma}, x::AbstractArray{T}, w::AbstractArray{Float6 slogx = zero(T) tw = zero(T) for i in eachindex(x, w) - @inbounds xi = x[i] - @inbounds wi = w[i] + xi = x[i] + wi = w[i] sx += wi * xi slogx += wi * log(xi) tw += wi diff --git a/src/univariate/continuous/inversegaussian.jl b/src/univariate/continuous/inversegaussian.jl index b585ec3b3d..c3a5661b42 100644 --- a/src/univariate/continuous/inversegaussian.jl +++ b/src/univariate/continuous/inversegaussian.jl @@ -196,7 +196,7 @@ function suffstats(::Type{<:InverseGaussian}, x::AbstractVector{<:Real}, w::Abst sx = zero(T) sinvx = zero(T) sw = zero(T) - @inbounds @simd for i in eachindex(x) + @simd for i in eachindex(x) sx += w[i]*x[i] sinvx += w[i]/x[i] sw += w[i] diff --git a/src/univariate/continuous/normal.jl b/src/univariate/continuous/normal.jl index ef2b77bb58..33fc2bb48c 100644 --- a/src/univariate/continuous/normal.jl +++ b/src/univariate/continuous/normal.jl @@ -138,14 +138,14 @@ function suffstats(::Type{<:Normal}, x::AbstractArray{T}) where T<:Real # compute s s = zero(T) + zero(T) for i in eachindex(x) - @inbounds s += x[i] + s += x[i] end m = s / n # compute s2 s2 = zero(m) for i in eachindex(x) - @inbounds s2 += abs2(x[i] - m) + s2 += abs2(x[i] - m) end NormalStats(s, m, s2, n) @@ -158,8 +158,8 @@ function suffstats(::Type{<:Normal}, x::AbstractArray{T}, w::AbstractArray{Float tw = 0.0 s = 0.0 * zero(T) for i in eachindex(x, w) - @inbounds wi = w[i] - @inbounds s += wi * x[i] + wi = w[i] + s += wi * x[i] tw += wi end m = s / tw @@ -167,7 +167,7 @@ function suffstats(::Type{<:Normal}, x::AbstractArray{T}, w::AbstractArray{Float # compute s2 s2 = zero(m) for i in eachindex(x, w) - @inbounds s2 += w[i] * abs2(x[i] - m) + s2 += w[i] * abs2(x[i] - m) end NormalStats(s, m, s2, tw) @@ -189,7 +189,7 @@ function suffstats(g::NormalKnownMu, x::AbstractArray{T}) where T<:Real μ = g.μ s2 = zero(T) + zero(μ) for i in eachindex(x) - @inbounds s2 += abs2(x[i] - μ) + s2 += abs2(x[i] - μ) end NormalKnownMuStats(g.μ, s2, length(x)) end @@ -199,8 +199,8 @@ function suffstats(g::NormalKnownMu, x::AbstractArray{T}, w::AbstractArray{Float s2 = 0.0 * abs2(zero(T) - zero(μ)) tw = 0.0 for i in eachindex(x, w) - @inbounds wi = w[i] - @inbounds s2 += abs2(x[i] - μ) * wi + wi = w[i] + s2 += abs2(x[i] - μ) * wi tw += wi end NormalKnownMuStats(g.μ, s2, tw) diff --git a/src/univariate/discrete/categorical.jl b/src/univariate/discrete/categorical.jl index 1ac338f6f0..64b850efca 100644 --- a/src/univariate/discrete/categorical.jl +++ b/src/univariate/discrete/categorical.jl @@ -70,7 +70,7 @@ function median(d::Categorical{T}) where {T<:Real} i = 0 while cp < 1/2 && i <= k i += 1 - @inbounds cp += p[i] + cp += p[i] end i end @@ -127,7 +127,7 @@ end function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}) where T<:Integer for i = 1 : length(x) - @inbounds xi = x[i] + xi = x[i] h[xi] += 1. # cannot use @inbounds, as no guarantee that x[i] is in bound end h @@ -139,8 +139,8 @@ function add_categorical_counts!(h::Vector{Float64}, x::AbstractArray{T}, w::Abs throw(DimensionMismatch("Inconsistent array lengths.")) end for i = 1 : n - @inbounds xi = x[i] - @inbounds wi = w[i] + xi = x[i] + wi = w[i] h[xi] += wi # cannot use @inbounds, as no guarantee that x[i] is in bound end h diff --git a/src/univariate/discrete/discretenonparametric.jl b/src/univariate/discrete/discretenonparametric.jl index 8e1eefab6e..3715cbbc1f 100644 --- a/src/univariate/discrete/discretenonparametric.jl +++ b/src/univariate/discrete/discretenonparametric.jl @@ -81,7 +81,7 @@ function rand(rng::AbstractRNG, d::DiscreteNonParametric) cp = p[1] i = 1 while cp <= draw && i < n - @inbounds cp += p[i +=1] + cp += p[i +=1] end return x[i] end @@ -122,11 +122,11 @@ function cdf(d::DiscreteNonParametric, x::Real) stop_idx = searchsortedlast(support(d), x) s = zero(P) if stop_idx < div(n, 2) - @inbounds for i in 1:stop_idx + for i in 1:stop_idx s += ps[i] end else - @inbounds for i in (stop_idx + 1):n + for i in (stop_idx + 1):n s += ps[i] end s = 1 - s @@ -148,12 +148,12 @@ function ccdf(d::DiscreteNonParametric, x::Real) stop_idx = searchsortedlast(support(d), x) s = zero(P) if stop_idx < div(n, 2) - @inbounds for i in 1:stop_idx + for i in 1:stop_idx s += ps[i] end s = 1 - s else - @inbounds for i in (stop_idx + 1):n + for i in (stop_idx + 1):n s += ps[i] end end @@ -170,7 +170,7 @@ function quantile(d::DiscreteNonParametric, q::Real) cp = p[1] while cp < q && i < k #Note: is i < k necessary? i += 1 - @inbounds cp += p[i] + cp += p[i] end x[i] end @@ -256,7 +256,7 @@ function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}) where { ps[1] += 1. xprev = x[1] - @inbounds for i = 2:N + for i = 2:N xi = x[i] if xi != xprev n += 1 @@ -292,7 +292,7 @@ function suffstats(::Type{<:DiscreteNonParametric}, x::AbstractArray{T}, ps[1] += w[1] xprev = x[1] - @inbounds for i = 2:N + for i = 2:N xi = x[i] wi = w[i] if xi != xprev diff --git a/src/univariate/discrete/poisson.jl b/src/univariate/discrete/poisson.jl index b794fe7a3b..11205238ba 100644 --- a/src/univariate/discrete/poisson.jl +++ b/src/univariate/discrete/poisson.jl @@ -124,8 +124,8 @@ function suffstats(::Type{<:Poisson}, x::AbstractArray{T}, w::AbstractArray{Floa sx = 0. tw = 0. for i in eachindex(x, w) - @inbounds wi = w[i] - @inbounds sx += x[i] * wi + wi = w[i] + sx += x[i] * wi tw += wi end PoissonStats(sx, tw) diff --git a/src/univariate/discrete/poissonbinomial.jl b/src/univariate/discrete/poissonbinomial.jl index 0db82f07bd..66ace32cbb 100644 --- a/src/univariate/discrete/poissonbinomial.jl +++ b/src/univariate/discrete/poissonbinomial.jl @@ -153,7 +153,7 @@ end function poissonbinomial_pdf(p) S = zeros(eltype(p), length(p) + 1) S[1] = 1 - @inbounds for (col, p_col) in enumerate(p) + for (col, p_col) in enumerate(p) q_col = 1 - p_col for row in col:(-1):1 S[row + 1] = q_col * S[row + 1] + p_col * S[row] @@ -199,7 +199,7 @@ end function _dft(x::Vector{T}) where T n = length(x) y = zeros(complex(float(T)), n) - @inbounds for j = 0:n-1, k = 0:n-1 + for j = 0:n-1, k = 0:n-1 y[k+1] += x[j+1] * cis(-π * float(T)(2 * mod(j * k, n)) / n) end return y @@ -222,10 +222,10 @@ sampler(d::PoissonBinomial) = PoissBinAliasSampler(d) function poissonbinomial_pdf_partialderivatives(p::AbstractVector{<:Real}) n = length(p) A = zeros(eltype(p), n, n + 1) - @inbounds for j in 1:n + for j in 1:n A[j, end] = 1 end - @inbounds for (i, pi) in enumerate(p) + for (i, pi) in enumerate(p) qi = 1 - pi for k in (n - i + 1):n kp1 = k + 1 @@ -243,7 +243,7 @@ function poissonbinomial_pdf_partialderivatives(p::AbstractVector{<:Real}) A[j, end] *= pi end end - @inbounds for j in 1:n, i in 1:n + for j in 1:n, i in 1:n A[i, j] -= A[i, j+1] end return A diff --git a/src/univariates.jl b/src/univariates.jl index b60e5a2949..dfc56430c1 100644 --- a/src/univariates.jl +++ b/src/univariates.jl @@ -96,7 +96,7 @@ function insupport!(r::AbstractArray, d::Union{D,Type{D}}, X::AbstractArray) whe length(r) == length(X) || throw(DimensionMismatch("Inconsistent array dimensions.")) for i in 1 : length(X) - @inbounds r[i] = insupport(d, X[i]) + r[i] = insupport(d, X[i]) end return r end @@ -144,7 +144,7 @@ end function _rand!(rng::AbstractRNG, sampler::Sampleable{Univariate}, A::AbstractArray{<:Real}) for i in eachindex(A) - @inbounds A[i] = rand(rng, sampler) + A[i] = rand(rng, sampler) end return A end diff --git a/test/testutils.jl b/test/testutils.jl index d1e215745a..46737e06d1 100644 --- a/test/testutils.jl +++ b/test/testutils.jl @@ -11,7 +11,7 @@ import ForwardDiff function _linspace(a::Float64, b::Float64, n::Int) intv = (b - a) / (n - 1) r = Vector{Float64}(undef, n) - @inbounds for i = 1:n + for i = 1:n r[i] = a + (i-1) * intv end r[n] = b @@ -171,7 +171,7 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable cnts = zeros(Int, m) cnts_sc = zeros(Int, m) for i = 1:n - @inbounds si = samples[i] + si = samples[i] if rmin <= si <= rmax cnts[si - rmin + 1] += 1 else @@ -179,7 +179,7 @@ function test_samples(s::Sampleable{Univariate, Discrete}, # the sampleable throw(DomainError(si, "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].")) end - @inbounds si_sc = samples3[i] + si_sc = samples3[i] if rmin <= si_sc <= rmax cnts_sc[si_sc - rmin + 1] += 1 else @@ -297,10 +297,10 @@ function test_samples(s::Sampleable{Univariate, Continuous}, # the sampleable # check whether all samples are in the valid range for i = 1:n - @inbounds si = samples[i] + si = samples[i] vmin <= si <= vmax || throw(DomainError(si, "sample generated by `rand(s, n)` is out of valid range [$vmin, $vmax].")) - @inbounds si_sc = samples3[i] + si_sc = samples3[i] vmin <= si_sc <= vmax || throw(DomainError(si, "sample generated by `[rand(s) for _ in 1:n]` is out of valid range [$vmin, $vmax].")) end