Skip to content

Commit

Permalink
update whole codebase to 0.6
Browse files Browse the repository at this point in the history
  • Loading branch information
Evizero committed Apr 2, 2017
1 parent 086367e commit 5af1495
Show file tree
Hide file tree
Showing 18 changed files with 139 additions and 175 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Expand Up @@ -3,7 +3,7 @@ os:
- linux
- osx
julia:
- 0.5
# - 0.6
- nightly
notifications:
email: false
Expand Down
4 changes: 2 additions & 2 deletions REQUIRE
@@ -1,4 +1,4 @@
julia 0.5
julia 0.6-
StatsBase 0.8.0
LearnBase 0.1.2 0.2.0
LearnBase 0.1.3 0.2.0
RecipesBase
4 changes: 2 additions & 2 deletions appveyor.yml
@@ -1,7 +1,7 @@
environment:
matrix:
- JULIAVERSION: "julialang/bin/winnt/x86/0.5/julia-0.5-latest-win32.exe"
- JULIAVERSION: "julialang/bin/winnt/x64/0.5/julia-0.5-latest-win64.exe"
# - JULIAVERSION: "julialang/bin/winnt/x86/0.6/julia-0.6-latest-win32.exe"
# - JULIAVERSION: "julialang/bin/winnt/x64/0.6/julia-0.6-latest-win64.exe"
- JULIAVERSION: "julianightlies/bin/winnt/x86/julia-latest-win32.exe"
- JULIAVERSION: "julianightlies/bin/winnt/x64/julia-latest-win64.exe"

Expand Down
4 changes: 0 additions & 4 deletions src/LossFunctions.jl
@@ -1,5 +1,4 @@
__precompile__(true)

module LossFunctions

using RecipesBase
Expand Down Expand Up @@ -65,8 +64,6 @@ include("supervised/weightedbinary.jl")
include("supervised/other.jl")
include("supervised/io.jl")

include("deprecate.jl")

# allow using some special losses as function
(loss::ScaledSupervisedLoss)(args...) = value(loss, args...)
(loss::WeightedBinaryLoss)(args...) = value(loss, args...)
Expand All @@ -82,4 +79,3 @@ for T in union(subtypes(DistanceLoss), subtypes(MarginLoss))
end

end # module

17 changes: 8 additions & 9 deletions src/averagemode.jl
@@ -1,4 +1,4 @@
abstract AverageMode
abstract type AverageMode end

module AvgMode

Expand All @@ -14,25 +14,24 @@ module AvgMode
WeightedMean,
WeightedSum

immutable None <: AverageMode end
immutable Sum <: AverageMode end
immutable Macro <: AverageMode end
immutable Mean <: AverageMode end
typealias Micro Mean
struct None <: AverageMode end
struct Sum <: AverageMode end
struct Macro <: AverageMode end
struct Mean <: AverageMode end
const Micro = Mean

immutable WeightedMean{T<:WeightVec} <: AverageMode
struct WeightedMean{T<:WeightVec} <: AverageMode
weights::T
normalize::Bool
end
WeightedMean(A::AbstractVector, normalize::Bool) = WeightedMean(weights(A), normalize)
WeightedMean(A::AbstractVector; normalize::Bool = true) = WeightedMean(weights(A), normalize)

immutable WeightedSum{T<:WeightVec} <: AverageMode
struct WeightedSum{T<:WeightVec} <: AverageMode
weights::T
normalize::Bool
end
WeightedSum(A::AbstractVector, normalize::Bool) = WeightedSum(weights(A), normalize)
WeightedSum(A::AbstractVector; normalize::Bool = false) = WeightedSum(weights(A), normalize)

end

1 change: 0 additions & 1 deletion src/common.jl
Expand Up @@ -7,4 +7,3 @@ end
macro _dimcheck(condition)
:(($(esc(condition))) || throw(DimensionMismatch("Dimensions of the parameters don't match: $($(string(condition)))")))
end

13 changes: 0 additions & 13 deletions src/deprecate.jl

This file was deleted.

42 changes: 19 additions & 23 deletions src/supervised/distance.jl
Expand Up @@ -7,9 +7,7 @@ iff `P > 1`.
``L(r) = |r|^P``
"""
immutable LPDistLoss{P} <: DistanceLoss
LPDistLoss() = typeof(P) <: Number ? new() : error()
end
struct LPDistLoss{P} <: DistanceLoss end

LPDistLoss(p::Number) = LPDistLoss{p}()

Expand Down Expand Up @@ -68,7 +66,7 @@ It is Lipschitz continuous and convex, but not strictly convex.
ŷ - y ŷ - y
```
"""
typealias L1DistLoss LPDistLoss{1}
const L1DistLoss = LPDistLoss{1}

sumvalue(loss::L1DistLoss, difference::AbstractArray) = sumabs(difference)
value(loss::L1DistLoss, difference::Number) = abs(difference)
Expand Down Expand Up @@ -111,7 +109,7 @@ It is strictly convex.
ŷ - y ŷ - y
```
"""
typealias L2DistLoss LPDistLoss{2}
const L2DistLoss = LPDistLoss{2}

value(loss::L2DistLoss, difference::Number) = abs2(difference)
deriv{T<:Number}(loss::L2DistLoss, difference::T) = T(2) * difference
Expand All @@ -126,7 +124,6 @@ isconvex(::L2DistLoss) = true
isstrictlyconvex(::L2DistLoss) = true
isstronglyconvex(::L2DistLoss) = true


# ===========================================================

doc"""
Expand All @@ -136,11 +133,11 @@ Measures distance on a circle of specified circumference `c`.
``L(r) = 1 - \cos \left( \frac{2 r \pi}{c} \right)``
"""
immutable PeriodicLoss{T<:AbstractFloat} <: DistanceLoss
struct PeriodicLoss{T<:AbstractFloat} <: DistanceLoss
k::T # k = 2π/circumference
function PeriodicLoss(circ::T)
function (::Type{PeriodicLoss{T}}){T}(circ::T)
circ > 0 || error("circumference should be strictly positive")
new(convert(T, 2π/circ))
new{T}(convert(T, 2π/circ))
end
end
PeriodicLoss{T<:AbstractFloat}(circ::T=1.0) = PeriodicLoss{T}(circ)
Expand Down Expand Up @@ -193,11 +190,11 @@ It is Lipschitz continuous and convex, but not strictly convex.
ŷ - y ŷ - y
```
"""
type HuberLoss{T<:AbstractFloat} <: DistanceLoss
struct HuberLoss{T<:AbstractFloat} <: DistanceLoss
d::T # boundary between quadratic and linear loss
function HuberLoss(d::T)
function (::Type{HuberLoss{T}}){T}(d::T)
d > 0 || error("Huber crossover parameter must be strictly positive.")
new(d)
new{T}(d)
end
end
HuberLoss{T<:AbstractFloat}(d::T=1.0) = HuberLoss{T}(d)
Expand Down Expand Up @@ -276,15 +273,15 @@ It is Lipschitz continuous and convex, but not strictly convex.
ŷ - y ŷ - y
```
"""
immutable L1EpsilonInsLoss{T<:AbstractFloat} <: DistanceLoss
struct L1EpsilonInsLoss{T<:AbstractFloat} <: DistanceLoss
ε::T

@inline function L1EpsilonInsLoss::T)
function (::Type{L1EpsilonInsLoss{T}}){T}::T)
ɛ > 0 || error("ɛ must be strictly positive")
new(ɛ)
new{T}(ɛ)
end
end
typealias EpsilonInsLoss L1EpsilonInsLoss
const EpsilonInsLoss = L1EpsilonInsLoss
@inline L1EpsilonInsLoss{T<:AbstractFloat}::T) = L1EpsilonInsLoss{T}(ε)
@inline L1EpsilonInsLoss::Number) = L1EpsilonInsLoss{Float64}(Float64(ε))

Expand Down Expand Up @@ -341,12 +338,12 @@ larger deviances quadratically. It is convex, but not strictly convex.
ŷ - y ŷ - y
```
"""
immutable L2EpsilonInsLoss{T<:AbstractFloat} <: DistanceLoss
struct L2EpsilonInsLoss{T<:AbstractFloat} <: DistanceLoss
ε::T

function L2EpsilonInsLoss::Number)
function (::Type{L2EpsilonInsLoss{T}}){T}::T)
ɛ > 0 || error("ɛ must be strictly positive")
new(convert(T, ɛ))
new{T})
end
end
L2EpsilonInsLoss{T<:AbstractFloat}::T) = L2EpsilonInsLoss{T}(ε)
Expand Down Expand Up @@ -408,7 +405,7 @@ It is strictly convex and Lipschitz continuous.
ŷ - y ŷ - y
```
"""
immutable LogitDistLoss <: DistanceLoss end
struct LogitDistLoss <: DistanceLoss end

function value(loss::LogitDistLoss, difference::Number)
er = exp(difference)
Expand Down Expand Up @@ -469,11 +466,11 @@ Furthermore it is symmetric if and only if `τ = 1/2`.
ŷ - y ŷ - y
```
"""
immutable QuantileLoss{T <: AbstractFloat} <: DistanceLoss
struct QuantileLoss{T <: AbstractFloat} <: DistanceLoss
τ::T
end

typealias PinballLoss QuantileLoss
const PinballLoss = QuantileLoss

function value{T1, T2 <: Number}(loss::QuantileLoss{T1}, diff::T2)
T = promote_type(T1, T2)
Expand All @@ -495,4 +492,3 @@ islipschitzcont_deriv(::QuantileLoss) = true
isconvex(::QuantileLoss) = true
isstrictlyconvex(::QuantileLoss) = false
isstronglyconvex(::QuantileLoss) = false

1 change: 0 additions & 1 deletion src/supervised/io.jl
Expand Up @@ -63,4 +63,3 @@ end
end
end
end

33 changes: 16 additions & 17 deletions src/supervised/margin.jl
Expand Up @@ -33,7 +33,7 @@ surrogate loss, such as one of those listed below.
y * h(x) y * h(x)
```
"""
immutable ZeroOneLoss <: MarginLoss end
struct ZeroOneLoss <: MarginLoss end

deriv(loss::ZeroOneLoss, target::Number, output::Number) = zero(output)
deriv2(loss::ZeroOneLoss, target::Number, output::Number) = zero(output)
Expand Down Expand Up @@ -82,7 +82,7 @@ It is Lipschitz continuous and convex, but not strictly convex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable PerceptronLoss <: MarginLoss end
struct PerceptronLoss <: MarginLoss end

value{T<:Number}(loss::PerceptronLoss, agreement::T) = max(zero(T), -agreement)
deriv{T<:Number}(loss::PerceptronLoss, agreement::T) = agreement >= 0 ? zero(T) : -one(T)
Expand Down Expand Up @@ -126,7 +126,7 @@ times differentiable, strictly convex, and Lipschitz continuous.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable LogitMarginLoss <: MarginLoss end
struct LogitMarginLoss <: MarginLoss end
value(loss::LogitMarginLoss, agreement::Number) = log1p(exp(-agreement))
deriv(loss::LogitMarginLoss, agreement::Number) = -one(agreement) / (one(agreement) + exp(agreement))
deriv2(loss::LogitMarginLoss, agreement::Number) = (eᵗ = exp(agreement); eᵗ / abs2(one(eᵗ) + eᵗ))
Expand Down Expand Up @@ -171,8 +171,8 @@ It is Lipschitz continuous and convex, but not strictly convex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable L1HingeLoss <: MarginLoss end
typealias HingeLoss L1HingeLoss
struct L1HingeLoss <: MarginLoss end
const HingeLoss = L1HingeLoss

value{T<:Number}(loss::L1HingeLoss, agreement::T) = max(zero(T), one(T) - agreement)
deriv{T<:Number}(loss::L1HingeLoss, agreement::T) = agreement >= 1 ? zero(T) : -one(T)
Expand Down Expand Up @@ -218,7 +218,7 @@ It is locally Lipschitz continuous and convex, but not strictly convex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable L2HingeLoss <: MarginLoss end
struct L2HingeLoss <: MarginLoss end

value{T<:Number}(loss::L2HingeLoss, agreement::T) = agreement >= 1 ? zero(T) : abs2(one(T) - agreement)
deriv{T<:Number}(loss::L2HingeLoss, agreement::T) = agreement >= 1 ? zero(T) : T(2) * (agreement - one(T))
Expand Down Expand Up @@ -264,12 +264,12 @@ It is Lipschitz continuous and convex, but not strictly convex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable SmoothedL1HingeLoss{T<:AbstractFloat} <: MarginLoss
struct SmoothedL1HingeLoss{T<:AbstractFloat} <: MarginLoss
gamma::T

function SmoothedL1HingeLoss::T)
function (::Type{SmoothedL1HingeLoss{T}}){T}::T)
γ > 0 || error("γ must be strictly positive")
new(γ)
new{T}(γ)
end
end
SmoothedL1HingeLoss{T<:AbstractFloat}::T) = SmoothedL1HingeLoss{T}(γ)
Expand Down Expand Up @@ -331,7 +331,7 @@ It is Lipschitz continuous and convex, but not strictly convex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable ModifiedHuberLoss <: MarginLoss end
struct ModifiedHuberLoss <: MarginLoss end

function value{T<:Number}(loss::ModifiedHuberLoss, agreement::T)
agreement >= -1 ? abs2(max(zero(T), one(agreement) - agreement)) : -T(4) * agreement
Expand Down Expand Up @@ -386,7 +386,7 @@ It is locally Lipschitz continuous and strongly convex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable L2MarginLoss <: MarginLoss end
struct L2MarginLoss <: MarginLoss end

value{T<:Number}(loss::L2MarginLoss, agreement::T) = abs2(one(T) - agreement)
deriv{T<:Number}(loss::L2MarginLoss, agreement::T) = T(2) * (agreement - one(T))
Expand Down Expand Up @@ -433,7 +433,7 @@ convex, but not clipable.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable ExpLoss <: MarginLoss end
struct ExpLoss <: MarginLoss end

value(loss::ExpLoss, agreement::Number) = exp(-agreement)
deriv(loss::ExpLoss, agreement::Number) = -exp(-agreement)
Expand Down Expand Up @@ -480,7 +480,7 @@ differentiable, Lipschitz continuous but nonconvex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable SigmoidLoss <: MarginLoss end
struct SigmoidLoss <: MarginLoss end

value(loss::SigmoidLoss, agreement::Number) = one(agreement) - tanh(agreement)
deriv(loss::SigmoidLoss, agreement::Number) = -abs2(sech(agreement))
Expand Down Expand Up @@ -527,11 +527,11 @@ It is Lipschitz continuous and convex, but not strictly convex.
y ⋅ ŷ y ⋅ ŷ
```
"""
immutable DWDMarginLoss{T<:AbstractFloat} <: MarginLoss
struct DWDMarginLoss{T<:AbstractFloat} <: MarginLoss
q::T
function DWDMarginLoss(q::T)
function (::Type{DWDMarginLoss{T}}){T}(q::T)
q > 0 || error("q must be strictly positive")
new(q)
new{T}(q)
end
end
DWDMarginLoss{T<:AbstractFloat}(q::T) = DWDMarginLoss{T}(q)
Expand Down Expand Up @@ -568,4 +568,3 @@ isstronglyconvex(::DWDMarginLoss) = false
isfishercons(::DWDMarginLoss) = true
isunivfishercons(::DWDMarginLoss) = true
isclipable(::DWDMarginLoss) = false

8 changes: 3 additions & 5 deletions src/supervised/other.jl
@@ -1,13 +1,11 @@
# ===============================================================

doc"""
PoissonLoss <: SupervisedLoss
Loss under a Poisson noise distribution (KL-divergence)
``L(target, output) = exp(output) - target*output``
"""
immutable PoissonLoss <: SupervisedLoss end
struct PoissonLoss <: SupervisedLoss end

value(loss::PoissonLoss, target::Number, output::Number) = exp(output) - target*output
deriv(loss::PoissonLoss, target::Number, output::Number) = exp(output) - target
Expand Down Expand Up @@ -36,8 +34,8 @@ Cross-entropy loss also known as log loss and logistic loss is defined as:
``L(target, output) = - target*ln(output) - (1-target)*ln(1-output)``
"""

immutable CrossentropyLoss <: SupervisedLoss end
typealias LogitProbLoss CrossentropyLoss
struct CrossentropyLoss <: SupervisedLoss end
const LogitProbLoss = CrossentropyLoss

function value(loss::CrossentropyLoss, target::Number, output::Number)
target >= 0 && target <=1 || error("target must be in [0,1]")
Expand Down

0 comments on commit 5af1495

Please sign in to comment.