Skip to content

Commit

Permalink
Code style and API cleanup.
Browse files Browse the repository at this point in the history
API changes:

1. remove `get_parent` (replaced by `Base.parent`),

2. remove `get_transformation` (replaced by the property accessor
`.transformation`); similary support `.log_density_function`

Code style changes:

Don't import extended methods, more consistent formatting.
  • Loading branch information
tpapp committed Mar 1, 2019
1 parent 1faa96d commit 798fa54
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 32 deletions.
2 changes: 1 addition & 1 deletion src/AD_Flux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ Gradient using algorithmic/automatic differentiation via Flux.
"""
ADgradient(::Val{:Flux}, ℓ) = FluxGradientLogDensity(ℓ)

show(io::IO, ∇ℓ::FluxGradientLogDensity) = print(io, "Flux AD wrapper for ", ∇ℓ.ℓ)
Base.show(io::IO, ∇ℓ::FluxGradientLogDensity) = print(io, "Flux AD wrapper for ", ∇ℓ.ℓ)

function logdensity(::Type{ValueGradient}, ∇ℓ::FluxGradientLogDensity, x::RealVector)
@unpack= ∇ℓ
Expand Down
2 changes: 1 addition & 1 deletion src/AD_ForwardDiff.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ struct ForwardDiffLogDensity{L, C} <: ADGradientWrapper
gradientconfig::C
end

function show(io::IO, ℓ::ForwardDiffLogDensity)
function Base.show(io::IO, ℓ::ForwardDiffLogDensity)
print(io, "ForwardDiff AD wrapper for ", ℓ.ℓ,
", w/ chunk size ", length(ℓ.gradientconfig.seeds))
end
Expand Down
6 changes: 4 additions & 2 deletions src/AD_ReverseDiff.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ struct ReverseDiffLogDensity{L, C} <: ADGradientWrapper
gradientconfig::C
end

show(io::IO, ℓ::ReverseDiffLogDensity) = print(io, "ReverseDiff AD wrapper for ", ℓ.ℓ)
Base.show(io::IO, ℓ::ReverseDiffLogDensity) = print(io, "ReverseDiff AD wrapper for ", ℓ.ℓ)

function logdensity(::Type{ValueGradient}, fℓ::ReverseDiffLogDensity, x::RealVector)
@unpack ℓ, gradientconfig = fℓ
Expand All @@ -20,7 +20,9 @@ struct ReverseDiffTapeLogDensity{L, R, T} <: ADGradientWrapper
compiled_tape::T
end

show(io::IO, ℓ::ReverseDiffTapeLogDensity) = print(io, "ReverseDiff AD wrapper (compiled tape) for ", ℓ.ℓ)
function Base.show(io::IO, ℓ::ReverseDiffTapeLogDensity)
print(io, "ReverseDiff AD wrapper (compiled tape) for ", ℓ.ℓ)
end

function logdensity(::Type{ValueGradient}, fℓ::ReverseDiffTapeLogDensity, x::RealVector)
@unpack result_buffer, compiled_tape = fℓ
Expand Down
58 changes: 33 additions & 25 deletions src/LogDensityProblems.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
module LogDensityProblems

export logdensity, dimension, TransformedLogDensity, get_transformation, get_parent,
reject_logdensity, LogDensityRejectErrors, ADgradient

import Base: eltype, isfinite, isinf, show
export logdensity, dimension, TransformedLogDensity,
reject_logdensity, LogDensityRejectErrors, ADgradient,
get_transformation, get_parent # deprecated

using ArgCheck: @argcheck
using BenchmarkTools: @belapsed
Expand All @@ -13,8 +12,11 @@ using Parameters: @unpack
using Random: AbstractRNG, GLOBAL_RNG
using Requires: @require

using TransformVariables: AbstractTransform, transform_logdensity, RealVector, TransformVariables
import TransformVariables: dimension, random_arg
using TransformVariables: AbstractTransform, transform_logdensity, RealVector,
TransformVariables, dimension, random_reals, random_arg

@deprecate get_parent(transformation) Base.parent(transformation)
@deprecate get_transformation(wrapper) wrapper.transformation

####
#### result types
Expand All @@ -41,7 +43,7 @@ See also [`logdensity`](@ref).
"""
Value(value::T) where {T <: Real} = Value{T}(value)

eltype(::Type{Value{T}}) where T = T
Base.eltype(::Type{Value{T}}) where T = T

struct ValueGradient{T, V <: AbstractVector{T}}
value::T
Expand Down Expand Up @@ -77,11 +79,11 @@ function ValueGradient(value::T1, gradient::AbstractVector{T2}) where {T1,T2}
ValueGradient(T(value), T T2 ? gradient : map(T, gradient))
end

eltype(::Type{ValueGradient{T,V}}) where {T,V} = T
Base.eltype(::Type{ValueGradient{T,V}}) where {T,V} = T

isfinite(v::Union{Value, ValueGradient}) = isfinite(v.value)
Base.isfinite(v::Union{Value, ValueGradient}) = isfinite(v.value)

isinf(v::Union{Value, ValueGradient}) = isinf(v.value)
Base.isinf(v::Union{Value, ValueGradient}) = isinf(v.value)

"""
$(TYPEDEF)
Expand Down Expand Up @@ -114,6 +116,8 @@ interface for `ℓ::AbstractLogDensityProblem`:
1. [`dimension`](@ref) returns the *dimension* of the domain of `ℓ`,
2. [`logdensity`](@ref) evaluates the log density `ℓ` at a given point.
See also [`LogDensityProblems.stresstest`](@ref) for stress testing.
"""
abstract type AbstractLogDensityProblem end

Expand All @@ -130,26 +134,30 @@ gradient, both returning eponymous types.
function logdensity end

"""
TransformedLogDensity(transformation, logdensityfunction)
TransformedLogDensity(transformation, log_density_function)
A problem in Bayesian inference. Vectors of length `dimension(transformation)` are
transformed into a general object `θ` (unrestricted type, but a named tuple is recommended
for clean code), correcting for the log Jacobian determinant of the transformation.
`logdensityfunction(θ)` is expected to return *real numbers*. For zero densities or infeasible
`θ`s, `-Inf` or similar should be returned, but for efficiency of inference most methods
recommend using `transformation` to avoid this.
It is recommended that `log_density_function` is a callable object that also encapsulates
the data for the problem.
It is recommended that `logdensityfunction` is a callable object that also
encapsulates the data for the problem.
`log_density_function(θ)` is expected to return *real numbers*. For zero densities or
infeasible `θ`s, `-Inf` or similar should be returned, but for efficiency of inference most
methods recommend using `transformation` to avoid this.
Use the property accessors `ℓ.transformation` and `ℓ.log_density_function` to access the
arguments of `ℓ::TransformedLogDensity`, these are part of the API.
"""
struct TransformedLogDensity{T <: AbstractTransform, L} <: AbstractLogDensityProblem
transformation::T
logdensityfunction::L
log_density_function::L
end

show(io::IO, ℓ::TransformedLogDensity) =
function Base.show(io::IO, ℓ::TransformedLogDensity)
print(io, "TransformedLogDensity of dimension $(dimension(ℓ.transformation))")
end

get_transformation(p::TransformedLogDensity) = p.transformation

Expand All @@ -158,12 +166,12 @@ $(SIGNATURES)
The dimension of the problem, ie the length of the vectors in its domain.
"""
dimension(p::TransformedLogDensity) = dimension(p.transformation)
TransformVariables.dimension(p::TransformedLogDensity) = dimension(p.transformation)

function logdensity(::Type{Value}, p::TransformedLogDensity, x::RealVector)
@unpack transformation, logdensityfunction = p
@unpack transformation, log_density_function = p
try
Value(transform_logdensity(transformation, logdensityfunction, x))
Value(transform_logdensity(transformation, log_density_function, x))
catch e
e isa RejectLogDensity || rethrow(e)
Value(-Inf)
Expand All @@ -183,9 +191,9 @@ Implementation detail, *not exported*.
"""
abstract type LogDensityWrapper <: AbstractLogDensityProblem end

get_parent(w::LogDensityWrapper) = w.
Base.parent(w::LogDensityWrapper) = w.

dimension(w::LogDensityWrapper) = dimension(get_parent(w))
TransformVariables.dimension(w::LogDensityWrapper) = dimension(parent(w))

####
#### wrappers -- convenience
Expand All @@ -211,7 +219,7 @@ minus_inf_like(::Type{ValueGradient}, x) = ValueGradient(convert(eltype(x), -Inf

function logdensity(kind, w::LogDensityRejectErrors, x)
try
logdensity(kind, get_parent(w), x)
logdensity(kind, parent(w), x)
catch
minus_inf_like(kind, x)
end
Expand Down Expand Up @@ -279,7 +287,7 @@ end
#### stress testing
####

random_arg(ℓ; kwargs...) = TransformVariables.random_reals(dimension(ℓ); kwargs...)
TransformVariables.random_arg(ℓ; kwargs...) = random_reals(dimension(ℓ); kwargs...)

"""
$(SIGNATURES)
Expand Down
6 changes: 3 additions & 3 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,12 @@ end
# a Bayesian problem
p = TransformedLogDensity(t, logposterior)
@test dimension(p) == 1
@test get_transformation(p) t
@test p.transformation t

# gradient of a problem
∇p = ADgradient(:ForwardDiff, p)
@test dimension(∇p) == 1
@test get_transformation(get_parent(∇p)) t
@test parent(∇p).transformation t

for _ in 1:100
x = random_arg(p)
Expand All @@ -77,7 +77,7 @@ end
∇p = ADgradient(:ForwardDiff, p)

@test dimension(p) == dimension(∇p) == dimension(t)
@test get_transformation(p) get_transformation(get_parent(∇p)) t
@test p.transformation parent(∇p).transformation t

for _ in 1:100
x = random_arg(p)
Expand Down

0 comments on commit 798fa54

Please sign in to comment.