Skip to content

Commit

Permalink
Merge pull request #19 from Cody-G/fix_0.5
Browse files Browse the repository at this point in the history
Fix 0.5
  • Loading branch information
lindahua committed Jan 24, 2017
2 parents 28361a1 + c6d0457 commit 001a22f
Show file tree
Hide file tree
Showing 6 changed files with 57 additions and 57 deletions.
4 changes: 2 additions & 2 deletions src/common.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ function axpby!{T<:BlasReal}(a::T, x::StridedVector{T}, b::T, y::StridedVector{T
end

gets(x::StridedVector, i::Int) = x[i]
gets(x::StridedMatrix, i::Int) = view(x,:,i)
gets{T}(x::StridedArray{T,3}, i::Int) = view(x,:,:,i)
gets(x::StridedMatrix, i::Int) = Base.view(x,:,i)
gets{T}(x::StridedArray{T,3}, i::Int) = Base.view(x,:,:,i)

shrink{T<:AbstractFloat}(x::T, t::T) = (x > t ? x - t : x < -t ? x + t : zero(T))
shrink{T<:AbstractFloat}(x::StridedVector{T}, t::T) = T[shrink(v, t) for v in x]
Expand Down
22 changes: 11 additions & 11 deletions src/prediction.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,15 +71,15 @@ end
function predict{T<:BlasReal}(pm::AffinePred, θ::StridedVector{T}, x::StridedVector{T})
d = pm.dim
@_checkdims length(θ) == d + 1 && length(x) == d
w = view(θ, 1:d)
w = Base.view(θ, 1:d)
b = convert(T, pm.bias) * θ[d+1]
dot(w, x) + b
end

function predict{T<:BlasReal}(pm::AffinePred, θ::StridedVector{T}, X::StridedMatrix{T})
d = pm.dim
@_checkdims length(θ) == d + 1 && size(X,1) == d
w = view(θ, 1:d)
w = Base.view(θ, 1:d)
b = convert(T, pm.bias) * θ[d+1]
r = X'w;
broadcast!(+, r, r, b)
Expand All @@ -90,7 +90,7 @@ function predict!{T<:BlasReal}(r::StridedVector{T}, pm::AffinePred, θ::StridedV
n = size(X,2)
@_checkdims length(θ) == d + 1 && size(X,1) == d && n == length(r)
b = convert(T, pm.bias) * θ[d+1]
w = view(θ, 1:d)
w = Base.view(θ, 1:d)
At_mul_B!(r, X, w)
broadcast!(+, r, r, b)
end
Expand Down Expand Up @@ -167,8 +167,8 @@ function predict{T<:BlasReal}(pm::MvAffinePred, θ::StridedMatrix{T}, x::Strided
d = pm.dim
k = pm.k
@_checkdims size(θ) == (k,d+1) && length(x) == d
W = view(θ, :, 1:d)
b = view(θ, :, d+1)
W = Base.view(θ, :, 1:d)
b = Base.view(θ, :, d+1)
r = W * x
axpy!(convert(T, pm.bias), b, r)
r
Expand All @@ -178,12 +178,12 @@ function predict{T<:BlasReal}(pm::MvAffinePred, θ::StridedMatrix{T}, X::Strided
d = pm.dim
k = pm.k
@_checkdims size(θ) == (k,d+1) && size(X,1) == d
W = view(θ, :, 1:d)
b = view(θ, :, d+1)
W = Base.view(θ, :, 1:d)
b = Base.view(θ, :, d+1)
R = W * X
bias = convert(T, pm.bias)
for i = 1:size(X,2)
axpy!(bias, b, view(R,:,i))
axpy!(bias, b, Base.view(R,:,i))
end
R
end
Expand All @@ -194,12 +194,12 @@ function predict!{T<:BlasReal}(r::StridedMatrix{T}, pm::MvAffinePred, θ::Stride
n = size(X,2)
@_checkdims size(θ) == (k,d+1) && size(X,1) == d && size(r) == (k,n)
bias = convert(T, pm.bias)
W = view(θ, :, 1:d)
b = view(θ, :, d+1)
W = Base.view(θ, :, 1:d)
b = Base.view(θ, :, d+1)
A_mul_B!(r, W, X)
bias = convert(T, pm.bias)
for i = 1:size(X,2)
axpy!(bias, b, view(r,:,i))
axpy!(bias, b, Base.view(r,:,i))
end
r
end
16 changes: 8 additions & 8 deletions src/risks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ function value_and_addgrad!{T<:BlasReal,L<:UnivariateLoss}(rm::SupervisedRiskMod
v, dv = value_and_deriv(loss, p, y)
α_ = convert(T, α)
β_ = convert(T, β)
axpby!(α_ * dv, x, β_, view(g, 1:d))
axpby!(α_ * dv, x, β_, Base.view(g, 1:d))
gb = dv * convert(T, pm.bias)
if β == zero(T)
g[d+1] = α_ * gb
Expand Down Expand Up @@ -144,7 +144,7 @@ function value_and_addgrad!{T<:BlasReal,L<:UnivariateLoss}(buffer::StridedVecOrM
end
α_ = convert(T, α)
β_ = convert(T, β)
gemv!('N', α_, x, u, β_, view(g, 1:d))
gemv!('N', α_, x, u, β_, Base.view(g, 1:d))
gb = convert(T, sum(u) * pm.bias)
if β == zero(T)
g[d+1] = α_ * gb
Expand Down Expand Up @@ -192,7 +192,7 @@ function value_and_addgrad!{T<:BlasReal,L<:MultivariateLoss}(buffer::StridedVecO
@assert size(u, 2) == n
v = zero(T)
for i = 1:n
u_i = view(u,:,i)
u_i = Base.view(u,:,i)
v_i, _ = value_and_grad!(loss, u_i, u_i, gets(y,i))
v += v_i
end
Expand All @@ -215,8 +215,8 @@ function value_and_addgrad!{T<:BlasReal,L<:MultivariateLoss}(rm::SupervisedRiskM
d = inputlen(pm)
α_ = convert(T, α)
β_ = convert(T, β)
gemm!('N', 'T', α_, u, x, β_, view(g, :, 1:d))
axpby!(convert(T, α_ * pm.bias), u, β_, view(g, :, d+1))
gemm!('N', 'T', α_, u, x, β_, Base.view(g, :, 1:d))
axpby!(convert(T, α_ * pm.bias), u, β_, Base.view(g, :, d+1))
(α_ * v, g)
end

Expand All @@ -240,14 +240,14 @@ function value_and_addgrad!{T<:BlasReal,L<:MultivariateLoss}(buffer::StridedVecO
@assert size(u, 2) == n
v = zero(T)
for i = 1:n
u_i = view(u,:,i)
u_i = Base.view(u,:,i)
v_i, _ = value_and_grad!(loss, u_i, u_i, gets(y,i))
v += v_i
end
d = inputlen(pm)
α_ = convert(T, α)
β_ = convert(T, β)
gemm!('N', 'T', α_, u, x, β_, view(g, :, 1:d))
axpby!(convert(T, α_ * pm.bias), vec(sum(u,2)), β_, view(g, :, d+1))
gemm!('N', 'T', α_, u, x, β_, Base.view(g, :, 1:d))
axpby!(convert(T, α_ * pm.bias), vec(sum(u,2)), β_, Base.view(g, :, d+1))
(α_ * v, g)
end
10 changes: 5 additions & 5 deletions test/multiloss.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
using EmpiricalRisks
using Base.Test
using DualNumbers
import DualNumbers

### Auxiliary functions

Expand All @@ -22,11 +22,11 @@ function verify_multiloss(loss::MultivariateLoss, f, u::Vector{Float64}, y)
for i = 1:d
_ep = zeros(d)
_ep[i] = 1.0
_in = dual(u, _ep)
_in = DualNumbers.dual(u, _ep)
_out = f(_in, y)
@assert isa(_out, Dual{Float64})
@assert isapprox(v_r, real(_out))
g_r[i] = epsilon(_out)
@assert isa(_out, DualNumbers.Dual{Float64})
@assert isapprox(v_r, DualNumbers.realpart(_out))
g_r[i] = DualNumbers.epsilon(_out)
end

# verify
Expand Down
18 changes: 9 additions & 9 deletions test/regularizers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -90,15 +90,15 @@ reg = NonNegReg()
@test prox!(reg, θ, θ, 1.0) == [0. 1. ; 0. 0.]

θ = [0. 0.5 ; 0.5 0.]
θ1 = EmpiricalRisks.view(θ, :, 2)
θ1 = Base.view(θ, :, 2)
θ2 = similar(θ1)
@test value(reg, θ1) == 0.
@test prox!(reg, θ2, θ1, 1.0) == [0.5, 0.]

θ1 = EmpiricalRisks.view(θ, 1, :)
θ1 = Base.view(θ, 1, :)
θ2 = similar(θ1)
@test value(reg, θ1) == 0.
@test prox!(reg, θ2, θ1, 1.0) == [0. 0.5]
@test prox!(reg, θ2, θ1, 1.0) == [0., 0.5]



Expand All @@ -122,15 +122,15 @@ reg = SimplexReg(1.0)
@test prox!(reg, θ, θ, 1.0) == [0. 0.5 ; 0.5 0.]

θ = [0. 0.5 ; 0.5 0.]
θ1 = EmpiricalRisks.view(θ, :, 2)
θ1 = Base.view(θ, :, 2)
θ2 = similar(θ1)
@test value(reg, θ1) == Inf
@test prox!(reg, θ2, θ1, 1.0) == [0.75, 0.25]

θ1 = EmpiricalRisks.view(θ, 1, :)
θ1 = Base.view(θ, 1, :)
θ2 = similar(θ1)
@test value(reg, θ1) == Inf
@test prox!(reg, θ2, θ1, 1.0) == [0.25 0.75]
@test prox!(reg, θ2, θ1, 1.0) == [0.25, 0.75]



Expand All @@ -154,12 +154,12 @@ reg = L1BallReg(1.0)
@test prox!(reg, θ, θ, 1.0) == [0. 0.5 ; -0.5 0]

θ = [0. 0.5 ; 0.5 0.]
θ1 = EmpiricalRisks.view(θ, :, 2)
θ1 = Base.view(θ, :, 2)
θ2 = similar(θ1)
@test value(reg, θ1) == 0.
@test prox!(reg, θ2, θ1, 1.0) == [0.5, 0.]

θ1 = EmpiricalRisks.view(θ, 1, :)
θ1 = Base.view(θ, 1, :)
θ2 = similar(θ1)
@test value(reg, θ1) == 0.
@test prox!(reg, θ2, θ1, 1.0) == [0. 0.5]
@test prox!(reg, θ2, θ1, 1.0) == [0., 0.5]
44 changes: 22 additions & 22 deletions test/uniloss.jl
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
using EmpiricalRisks
using Base.Test
using DualNumbers
import DualNumbers

### auxiliary functions

function verify_uniloss(loss::UnivariateLoss, f, u::Float64, y::Real)
# verify inferred types
for VT in [Float64, Float32]
@test Base.return_types(value, Tuple{typeof(loss), VT, VT}) == [VT]
@test Base.return_types(deriv, Tuple{typeof(loss), VT, VT}) == [VT]
@test Base.return_types(value_and_deriv, Tuple{typeof(loss), VT, VT}) == [Tuple{VT, VT}]
@test Base.return_types(EmpiricalRisks.value, Tuple{typeof(loss), VT, VT}) == [VT]
@test Base.return_types(EmpiricalRisks.deriv, Tuple{typeof(loss), VT, VT}) == [VT]
@test Base.return_types(EmpiricalRisks.value_and_deriv, Tuple{typeof(loss), VT, VT}) == [Tuple{VT, VT}]
end

# verify computation correctness
r = f(dual(u, 1.0), y)
v_r = realpart(r)
dv_r = epsilon(r)
r = f(DualNumbers.dual(u, 1.0), y)
v_r = DualNumbers.realpart(r)
dv_r = DualNumbers.epsilon(r)

@test_approx_eq v_r value(loss, u, y)
@test_approx_eq dv_r deriv(loss, u, y)
Expand All @@ -36,7 +36,7 @@ end

# AbsLoss

_abs(u::Dual) = realpart(u) == 0.0 ? dual(0.0, 0.0) : abs(u)
_abs(u::DualNumbers.Dual) = DualNumbers.realpart(u) == 0.0 ? DualNumbers.dual(0.0, 0.0) : DualNumbers.abs(u)

verify_uniloss(AbsLoss(),
(p, y) -> _abs(p - y), -3.0:3.0, -1.0:0.5:1.0)
Expand All @@ -48,29 +48,29 @@ verify_uniloss(SqrLoss(),

# QuantileLoss

function _quanlossf(t::Float64, u::Dual, y)
realpart(u) > y ? t * (u - y) :
realpart(u) < y ? (1.0 - t) * (y - u) :
dual(0.0, 0.0)
function _quanlossf(t::Float64, u::DualNumbers.Dual, y)
DualNumbers.realpart(u) > y ? t * (u - y) :
DualNumbers.realpart(u) < y ? (1.0 - t) * (y - u) :
DualNumbers.dual(0.0, 0.0)
end

verify_uniloss(QuantileLoss(0.3), (p, y) -> _quanlossf(0.3, p, y), -2.0:0.5:2.0, -1.0:0.5:1.0)
verify_uniloss(QuantileLoss(0.5), (p, y) -> _quanlossf(0.5, p, y), -2.0:0.5:2.0, -1.0:0.5:1.0)

# EpsilonInsLoss

function _epsinsensf(eps::Float64, u::Dual, y)
a = abs(realpart(u) - y)
a > eps ? _abs(u - y) - eps : dual(0.0, 0.0)
function _epsinsensf(eps::Float64, u::DualNumbers.Dual, y)
a = abs(DualNumbers.realpart(u) - y)
a > eps ? _abs(u - y) - eps : DualNumbers.dual(0.0, 0.0)
end

verify_uniloss(EpsilonInsLoss(0.3), (p, y) -> _epsinsensf(0.3, p, y), -2.0:0.25:2.0, -1.0:0.5:1.0)
verify_uniloss(EpsilonInsLoss(0.5), (p, y) -> _epsinsensf(0.5, p, y), -2.0:0.25:2.0, -1.0:0.5:1.0)

# HuberLoss

function _huberf(h::Float64, u::Dual, y)
a = abs(realpart(u) - y)
function _huberf(h::Float64, u::DualNumbers.Dual, y)
a = abs(DualNumbers.realpart(u) - y)
a > h ? h * _abs(u - y) - 0.5 * h^2 : 0.5 * abs2(u - y)
end

Expand All @@ -79,19 +79,19 @@ verify_uniloss(HuberLoss(0.5), (p, y) -> _huberf(0.5, p, y), -2.0:0.25:2.0, -1.0

# HingeLoss

_hingef(u::Dual, y) = y * realpart(u) < 1.0 ? 1.0 - y * u : dual(0.0, 0.0)
_hingef(u::DualNumbers.Dual, y) = y * DualNumbers.realpart(u) < 1.0 ? 1.0 - y * u : DualNumbers.dual(0.0, 0.0)
verify_uniloss(HingeLoss(), _hingef, -2.0:0.5:2.0, [-1.0, 1.0])

# SquaredHingeLoss

_sqrhingef(u::Dual, y) = y * realpart(u) < 1.0 ? (1.0 - y * u).^2 : dual(0.0, 0.0)
_sqrhingef(u::DualNumbers.Dual, y) = y * DualNumbers.realpart(u) < 1.0 ? (1.0 - y * u).^2 : DualNumbers.dual(0.0, 0.0)
verify_uniloss(SqrHingeLoss(), _sqrhingef, -2.0:0.5:2.0, [-1.0, 1.0])

# SmoothedHingeLoss

function _sm_hingef(h::Float64, u::Dual, y)
yu = y * realpart(u)
yu >= 1.0 + h ? dual(0.0, 0.0) :
function _sm_hingef(h::Float64, u::DualNumbers.Dual, y)
yu = y * DualNumbers.realpart(u)
yu >= 1.0 + h ? DualNumbers.dual(0.0, 0.0) :
yu <= 1.0 - h ? 1.0 - y * u :
abs2(1.0 + h - y * u) / (4 * h)
end
Expand Down

0 comments on commit 001a22f

Please sign in to comment.