From 2265004d6ebe902dbe785a6bf47ecabeeac49ab5 Mon Sep 17 00:00:00 2001 From: Christof Stocker Date: Fri, 3 Aug 2018 17:15:54 +0200 Subject: [PATCH] update more 0.7 deprecations --- Project.toml.template | 20 ++++++++++++++++++ src/LossFunctions.jl | 5 ++--- src/supervised/distance.jl | 42 +++++++++++++++++++------------------- src/supervised/margin.jl | 20 +++++++++--------- test/runtests.jl | 7 +++---- 5 files changed, 56 insertions(+), 38 deletions(-) create mode 100644 Project.toml.template diff --git a/Project.toml.template b/Project.toml.template new file mode 100644 index 0000000..11526ef --- /dev/null +++ b/Project.toml.template @@ -0,0 +1,20 @@ +name = "LossFunctions" +uuid = "7f8f8fb0-2700-5f03-b4bd-41f8cfc144b6" +version = "0.2.1" + +[deps] +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +LearnBase = "7f8f8fb0-2700-5f03-b4bd-41f8cfc144b6" +Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" +RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" + +[extras] +DualNumbers = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["DualNumbers", "Test", "Statistics", "Random"] diff --git a/src/LossFunctions.jl b/src/LossFunctions.jl index b14bfcf..614f082 100644 --- a/src/LossFunctions.jl +++ b/src/LossFunctions.jl @@ -1,14 +1,13 @@ -__precompile__(true) module LossFunctions using RecipesBase import Base.* using Base.Cartesian -using SparseArrays, InteractiveUtils +using Markdown, SparseArrays, InteractiveUtils -using LearnBase +using LearnBase import LearnBase: value, value!, deriv, deriv2, scaled, value_deriv, isminimizable, isdifferentiable, diff --git a/src/supervised/distance.jl b/src/supervised/distance.jl index 6ece787..29f6f11 100644 --- a/src/supervised/distance.jl +++ b/src/supervised/distance.jl @@ -112,8 +112,8 @@ It is strictly convex. const L2DistLoss = LPDistLoss{2} value(loss::L2DistLoss, difference::Number) = abs2(difference) -deriv(loss::L2DistLoss, difference::T) where {T<:Number} = T(2) * difference -deriv2(loss::L2DistLoss, difference::T) where {T<:Number} = T(2) +deriv(loss::L2DistLoss, difference::T) where {T<:Number} = convert(T,2) * difference +deriv2(loss::L2DistLoss, difference::T) where {T<:Number} = convert(T,2) isdifferentiable(::L2DistLoss) = true isdifferentiable(::L2DistLoss, at) = true @@ -204,17 +204,17 @@ function value(loss::HuberLoss{T1}, difference::T2) where {T1,T2<:Number} T = promote_type(T1,T2) abs_diff = abs(difference) if abs_diff <= loss.d - return T(0.5)*abs2(difference) # quadratic + return convert(T,0.5)*abs2(difference) # quadratic else - return (loss.d*abs_diff) - T(0.5)*abs2(loss.d) # linear + return (loss.d*abs_diff) - convert(T,0.5)*abs2(loss.d) # linear end end function deriv(loss::HuberLoss{T1}, difference::T2) where {T1,T2<:Number} T = promote_type(T1,T2) if abs(difference) <= loss.d - return T(difference) # quadratic + return convert(T,difference) # quadratic else - return loss.d*T(sign(difference)) # linear + return loss.d*convert(T,sign(difference)) # linear end end function deriv2(loss::HuberLoss{T1}, difference::T2) where {T1,T2<:Number} @@ -225,11 +225,11 @@ function value_deriv(loss::HuberLoss{T1}, difference::T2) where {T1,T2<:Number} T = promote_type(T1,T2) abs_diff = abs(difference) if abs_diff <= loss.d - val = T(0.5)*abs2(difference) - der = T(difference) + val = convert(T,0.5)*abs2(difference) + der = convert(T,difference) else - val = (loss.d*abs_diff) - T(0.5)*abs2(loss.d) - der = loss.d*T(sign(difference)) + val = (loss.d*abs_diff) - convert(T,0.5)*abs2(loss.d) + der = loss.d*convert(T,sign(difference)) end return val,der end @@ -291,13 +291,13 @@ function value(loss::L1EpsilonInsLoss{T1}, difference::T2) where {T1,T2<:Number} end function deriv(loss::L1EpsilonInsLoss{T1}, difference::T2) where {T1,T2<:Number} T = promote_type(T1,T2) - abs(difference) <= loss.ε ? zero(T) : T(sign(difference)) + abs(difference) <= loss.ε ? zero(T) : convert(T,sign(difference)) end deriv2(loss::L1EpsilonInsLoss{T1}, difference::T2) where {T1,T2<:Number} = zero(promote_type(T1,T2)) function value_deriv(loss::L1EpsilonInsLoss{T1}, difference::T2) where {T1,T2<:Number} T = promote_type(T1,T2) absr = abs(difference) - absr <= loss.ε ? (zero(T), zero(T)) : (absr - loss.ε, T(sign(difference))) + absr <= loss.ε ? (zero(T), zero(T)) : (absr - loss.ε, convert(T,sign(difference))) end issymmetric(::L1EpsilonInsLoss) = true @@ -356,16 +356,16 @@ end function deriv(loss::L2EpsilonInsLoss{T1}, difference::T2) where {T1,T2<:Number} T = promote_type(T1,T2) absr = abs(difference) - absr <= loss.ε ? zero(T) : T(2)*sign(difference)*(absr - loss.ε) + absr <= loss.ε ? zero(T) : convert(T,2)*sign(difference)*(absr - loss.ε) end function deriv2(loss::L2EpsilonInsLoss{T1}, difference::T2) where {T1,T2<:Number} T = promote_type(T1,T2) - abs(difference) <= loss.ε ? zero(T) : T(2) + abs(difference) <= loss.ε ? zero(T) : convert(T,2) end function value_deriv(loss::L2EpsilonInsLoss{T}, difference::Number) where T absr = abs(difference) diff = absr - loss.ε - absr <= loss.ε ? (zero(T), zero(T)) : (abs2(diff), T(2)*sign(difference)*diff) + absr <= loss.ε ? (zero(T), zero(T)) : (abs2(diff), convert(T,2)*sign(difference)*diff) end issymmetric(::L2EpsilonInsLoss) = true @@ -410,21 +410,21 @@ struct LogitDistLoss <: DistanceLoss end function value(loss::LogitDistLoss, difference::Number) er = exp(difference) T = typeof(er) - -log(T(4)) - difference + 2log(one(T) + er) + -log(convert(T,4)) - difference + 2log(one(T) + er) end function deriv(loss::LogitDistLoss, difference::T) where T<:Number - tanh(difference / T(2)) + tanh(difference / convert(T,2)) end function deriv2(loss::LogitDistLoss, difference::Number) er = exp(difference) T = typeof(er) - T(2)*er / abs2(one(T) + er) + convert(T,2)*er / abs2(one(T) + er) end function value_deriv(loss::LogitDistLoss, difference::Number) er = exp(difference) T = typeof(er) er1 = one(T) + er - -log(T(4)) - difference + 2log(er1), (er - one(T)) / (er1) + -log(convert(T,4)) - difference + 2log(er1), (er - one(T)) / (er1) end issymmetric(::LogitDistLoss) = true @@ -474,11 +474,11 @@ const PinballLoss = QuantileLoss function value(loss::QuantileLoss{T1}, diff::T2) where {T1, T2 <: Number} T = promote_type(T1, T2) - diff * (T(diff > 0) - loss.τ) + diff * (convert(T,diff > 0) - loss.τ) end function deriv(loss::QuantileLoss{T1}, diff::T2) where {T1, T2 <: Number} T = promote_type(T1, T2) - T(diff > 0) - loss.τ + convert(T,diff > 0) - loss.τ end deriv2(::QuantileLoss{T1}, diff::T2) where {T1, T2 <: Number} = zero(promote_type(T1, T2)) diff --git a/src/supervised/margin.jl b/src/supervised/margin.jl index 153fa83..d8b893a 100644 --- a/src/supervised/margin.jl +++ b/src/supervised/margin.jl @@ -221,9 +221,9 @@ It is locally Lipschitz continuous and convex, but not strictly convex. struct L2HingeLoss <: MarginLoss end value(loss::L2HingeLoss, agreement::T) where {T<:Number} = agreement >= 1 ? zero(T) : abs2(one(T) - agreement) -deriv(loss::L2HingeLoss, agreement::T) where {T<:Number} = agreement >= 1 ? zero(T) : T(2) * (agreement - one(T)) -deriv2(loss::L2HingeLoss, agreement::T) where {T<:Number} = agreement >= 1 ? zero(T) : T(2) -value_deriv(loss::L2HingeLoss, agreement::T) where {T<:Number} = agreement >= 1 ? (zero(T), zero(T)) : (abs2(one(T) - agreement), T(2) * (agreement - one(T))) +deriv(loss::L2HingeLoss, agreement::T) where {T<:Number} = agreement >= 1 ? zero(T) : convert(T,2) * (agreement - one(T)) +deriv2(loss::L2HingeLoss, agreement::T) where {T<:Number} = agreement >= 1 ? zero(T) : convert(T,2) +value_deriv(loss::L2HingeLoss, agreement::T) where {T<:Number} = agreement >= 1 ? (zero(T), zero(T)) : (abs2(one(T) - agreement), convert(T,2) * (agreement - one(T))) isunivfishercons(::L2HingeLoss) = true isdifferentiable(::L2HingeLoss) = true @@ -334,17 +334,17 @@ It is Lipschitz continuous and convex, but not strictly convex. struct ModifiedHuberLoss <: MarginLoss end function value(loss::ModifiedHuberLoss, agreement::T) where T<:Number - agreement >= -1 ? abs2(max(zero(T), one(agreement) - agreement)) : -T(4) * agreement + agreement >= -1 ? abs2(max(zero(T), one(agreement) - agreement)) : -convert(T,4) * agreement end function deriv(loss::ModifiedHuberLoss, agreement::T) where T<:Number if agreement >= -1 - agreement > 1 ? zero(T) : T(2)*agreement - T(2) + agreement > 1 ? zero(T) : convert(T,2)*agreement - convert(T,2) else - -T(4) + -convert(T,4) end end function deriv2(loss::ModifiedHuberLoss, agreement::T) where T<:Number - agreement < -1 || agreement > 1 ? zero(T) : T(2) + agreement < -1 || agreement > 1 ? zero(T) : convert(T,2) end isdifferentiable(::ModifiedHuberLoss) = true @@ -389,8 +389,8 @@ It is locally Lipschitz continuous and strongly convex. struct L2MarginLoss <: MarginLoss end value(loss::L2MarginLoss, agreement::T) where {T<:Number} = abs2(one(T) - agreement) -deriv(loss::L2MarginLoss, agreement::T) where {T<:Number} = T(2) * (agreement - one(T)) -deriv2(loss::L2MarginLoss, agreement::T) where {T<:Number} = T(2) +deriv(loss::L2MarginLoss, agreement::T) where {T<:Number} = convert(T,2) * (agreement - one(T)) +deriv2(loss::L2MarginLoss, agreement::T) where {T<:Number} = convert(T,2) isunivfishercons(::L2MarginLoss) = true isdifferentiable(::L2MarginLoss) = true @@ -484,7 +484,7 @@ struct SigmoidLoss <: MarginLoss end value(loss::SigmoidLoss, agreement::Number) = one(agreement) - tanh(agreement) deriv(loss::SigmoidLoss, agreement::Number) = -abs2(sech(agreement)) -deriv2(loss::SigmoidLoss, agreement::T) where {T<:Number} = T(2) * tanh(agreement) * abs2(sech(agreement)) +deriv2(loss::SigmoidLoss, agreement::T) where {T<:Number} = convert(T,2) * tanh(agreement) * abs2(sech(agreement)) isunivfishercons(::SigmoidLoss) = true isdifferentiable(::SigmoidLoss) = true diff --git a/test/runtests.jl b/test/runtests.jl index 5aa91b5..a2264f6 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,7 +1,6 @@ module LossFunctionsTests -using LearnBase, LossFunctions, DualNumbers, Statistics, Random, SparseArrays, Test -using LossFunctions - +using LearnBase, LossFunctions, DualNumbers +using Statistics, Random, SparseArrays, Test tests = [ "tst_loss.jl", @@ -15,7 +14,7 @@ perf = [ # for deterministic testing -srand(1234) +Random.seed!(1234) distance_losses = [ L2DistLoss(), LPDistLoss(2.0), L1DistLoss(), LPDistLoss(1.0),