Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

InceptionTime Model for Time Series #256

Merged
merged 12 commits into from
Sep 27, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 2 additions & 3 deletions FastTimeSeries/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ authors = ["FluxML Community"]
version = "0.1.0"

[deps]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
DataDeps = "124859b0-ceae-595e-8997-d05f6a7a8dfe"
FastAI = "5d0beca9-ade8-49ae-ad0b-a3cf890e669f"
FilePathsBase = "48062228-2e41-5def-b9a4-89aafe57970f"
Expand All @@ -13,7 +14,6 @@ MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
DataDeps = "0.7"
Expand All @@ -23,5 +23,4 @@ Flux = "0.12, 0.13"
InlineTest = "0.2"
MLUtils = "0.2"
UnicodePlots = "2, 3"
julia = "1.6"
Zygote = "0.6"
julia = "1.6"
2 changes: 2 additions & 0 deletions FastTimeSeries/src/FastTimeSeries.jl
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,14 @@ using FilePathsBase
using InlineTest
using Statistics
using UnicodePlots
using Flux

# Blocks
include("blocks/timeseriesrow.jl")

# Encodings
include("encodings/tspreprocessing.jl")
include("encodings/continuouspreprocessing.jl")

# Models
include("models/Models.jl")
Expand Down
7 changes: 0 additions & 7 deletions FastTimeSeries/src/container.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,3 @@
#= TODO: loadfile

elseif endswith(file, ".ts")
return _ts2df(file)

=#

Datasets.loadfile(file::String, ::Val{:ts}) = _ts2df(file)

#TimeSeriesDataset
Expand Down
15 changes: 15 additions & 0 deletions FastTimeSeries/src/encodings/continuouspreprocessing.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
struct ContinuousPreprocessing <: Encoding
numlabels::Int
end

ContinuousPreprocessing() = ContinuousPreprocessing(1)

decodedblock(c::ContinuousPreprocessing, block::AbstractArray) = Continuous(c.numlabels)

function encode(::ContinuousPreprocessing, _, block::Continuous, obs)
return [obs]
end

function decode(::ContinuousPreprocessing, _, block::AbstractArray, obs)
return obs[1]
end
8 changes: 5 additions & 3 deletions FastTimeSeries/src/models.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ Construct a model for time-series classification.
function blockmodel(inblock::TimeSeriesRow,
outblock::OneHotTensor{0},
backbone)
data = rand(Float32, inblock.nfeatures, 32, inblock.obslength)
# data = [rand(Float32, inblock.nfeatures, 32) for _ ∈ 1:inblock.obslength]
data = zeros(Float32, inblock.nfeatures, 1, 1)
output = backbone(data)
Flux.reset!(backbone)
return Models.RNNModel(backbone, outsize = length(outblock.classes), recout = size(output, 1))
end

Expand All @@ -24,4 +24,6 @@ end

# ## Tests

@testset "blockbackbone" begin @test_nowarn FastAI.blockbackbone(TimeSeriesRow(1,140)) end
@testset "blockbackbone" begin
@test_nowarn FastAI.blockbackbone(TimeSeriesRow(1,140))
end
88 changes: 88 additions & 0 deletions FastTimeSeries/src/models/InceptionTime.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
"""
InceptionModule(ni::Int, nf::Int, ks::Int = 40, bottleneck::Bool = true)

An InceptionModule consists of an (optional) bottleneck, followed by
3 conv1d layers.
"""
function InceptionModule(ni::Int, nf::Int, kernel_size::Int = 40, bottleneck::Bool = true)
ks = [kernel_size ÷ (2^i) for i in 0:2]
ks = [ks[i] % 2 == 0 ? ks[i] - 1 : ks[i] for i in 1:3] # ensure odd ks
bottleneck = ni > 1 ? bottleneck : false

bottleneck_block = bottleneck ? Conv1d(ni, nf, 1, bias = false) : identity

convs_layers =
[Conv1d(bottleneck ? nf : ni, nf, ks[i], bias = false) for i in 1:3]

convs = Chain(bottleneck_block, Parallel(hcat, convs_layers...))

maxconvpool = Chain(MaxPool((3,), pad = 1, stride = 1), Conv1d(ni, nf, 1, bias = false))

return Chain(Parallel(hcat, convs, maxconvpool), BatchNorm(nf * 4, relu))
end

"""
InceptionBlock(ni::Int, nf::Int = 32, residual::Bool = true, depth::Int = 6)

An InceptionBlock consists of variable number of InceptionModule depending on the depth.
Optionally residual.
"""
function InceptionBlock(ni::Int, nf::Int = 32, residual::Bool = true, depth::Int = 6)
inception = []
shortcut = []

for d in 1:depth
push!(inception, InceptionModule(d == 1 ? ni : nf * 4, nf))
if residual && d % 3 == 0
n_in = d == 3 ? ni : nf * 4
n_out = nf * 4
skip =
n_in == n_out ? BatchNorm(n_out) :
Chain(Conv1d(n_in, n_out, 1), BatchNorm(n_out))
push!(shortcut, skip)
end
end

blocks = []
d = 1

while d <= depth
blk = []
while d <= depth
push!(blk, inception[d])
if d % 3 == 0
d += 1
break
end
d += 1
end
if residual && d ÷ 3 <= length(shortcut)
skp = shortcut[d÷3]
push!(blocks, Parallel(+, Chain(blk...), skp))
else
push!(blocks, Chain(blk...))
end
end
return Chain(blocks...)
end

changedims(X) = permutedims(X, (2, 1, 3))

"""
InceptionTime(c_in::Int, c_out::Int, seq_len = nothing, nf::Int = 32)

A Julia Implemention of the InceptionTime model.
From https://arxiv.org/abs/1909.04939

## Arguments.

- `c_in` : The number of input channels.
- `c_out`: The number of output classes.
- `nf` : The number of "hidden channels" to use.
"""
function InceptionTime(c_in::Int, c_out::Int, nf::Int = 32)
inceptionblock = InceptionBlock(c_in, nf)
gap = GAP1d(1)
fc = Dense(nf * 4, c_out)
return Chain(changedims, inceptionblock, gap, fc)
end
7 changes: 5 additions & 2 deletions FastTimeSeries/src/models/Models.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,13 @@ using Flux
using Zygote
using DataDeps
using InlineTest
using ChainRulesCore

include("StackedLSTM.jl")
# include("StackedLSTM.jl")
include("layers.jl")
include("RNN.jl")
include("InceptionTime.jl")

export StackedLSTM, RNNModel
export StackedLSTM, RNNModel, InceptionTime

end
14 changes: 5 additions & 9 deletions FastTimeSeries/src/models/RNN.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
function tabular2rnn(X::AbstractArray{Float32, 3})
X = permutedims(X, (1, 3, 2))
return X
end
tabular2rnn(X::AbstractArray{<:AbstractFloat, 3}) = permutedims(X, (1, 3, 2))

struct RNNModel{A, B}
recbackbone::A
Expand All @@ -22,16 +19,15 @@ is passed through a dropout layer before a 'finalclassifier' block.
- `dropout_rate`: Dropout probability for the dropout layer.
"""

function RNNModel(recbackbone;
outsize,
recout,
kwargs...)
function RNNModel(recbackbone; outsize, recout)
return RNNModel(recbackbone, Dense(recout, outsize))
end

function (m::RNNModel)(X)
X = tabular2rnn(X)
Flux.reset!(m.recbackbone)
ChainRulesCore.ignore_derivatives() do
Flux.reset!(m.recbackbone)
end
X = m.recbackbone(X)[:, :, end]
return m.finalclassifier(X)
end
Expand Down
26 changes: 0 additions & 26 deletions FastTimeSeries/src/models/StackedLSTM.jl

This file was deleted.

36 changes: 36 additions & 0 deletions FastTimeSeries/src/models/layers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,40 @@ Create a Global Adaptive Pooling + Flatten layer.
function GAP1d(output_size::Int)
gap = AdaptiveMeanPool((output_size,))
Chain(gap, Flux.flatten)
end

"""
StackedLSTM(in, out, hiddensize, layers)

Stacked LSTM network. Feeds the data through a chain of LSTM layers, where the hidden state
of the previous layer gets fed to the next one. The first layer corresponds to
`LSTM(in, hiddensize)`, the hidden layers to `LSTM(hiddensize, hiddensize)`, and the final
layer to `LSTM(hiddensize, out)`. Takes the keyword argument `init` for the initialization
of the layers.

"""
function StackedLSTM(c_in::Int, c_out::Integer, hiddensize::Integer, layers::Integer;
init=Flux.glorot_uniform)
if layers == 1
return Chain(LSTM(c_in, c_out; init=init))
elseif layers == 2
return Chain(LSTM(c_in, hiddensize; init=init),
LSTM(hiddensize, c_out; init=init))
else
chain_vec = [LSTM(c_in, hiddensize; init=init)]
for i = 1:layers - 2
push!(chain_vec, LSTM(hiddensize, hiddensize; init=init))
end
return Chain(chain_vec..., LSTM(hiddensize, c_out; init=init))
end
end

function Conv1d(ni, nf, ks; stride = 1, padding = Flux.SamePad, dilation = 1, bias = true)
return Conv(
(ks,),
ni => nf,
stride = stride,
pad = ks ÷ 2 * dilation;
bias = false
)
end
4 changes: 0 additions & 4 deletions FastTimeSeries/src/recipes.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@ function Datasets.recipeblocks(recipe::TimeSeriesDatasetRecipe)
return Tuple{TimeSeriesRow, Continuous}
end
end
# Datasets.recipeblocks(::Type{TimeSeriesDatasetRecipe}) = Tuple{TimeSeriesRow, Label}

#TODO: Add Check if test_file is nothing.
function Datasets.loadrecipe(recipe::TimeSeriesDatasetRecipe, path)
path = convert(String, path)
datasetpath_train = joinpath(path, recipe.train_file)
Expand All @@ -47,7 +45,6 @@ function Datasets.loadrecipe(recipe::TimeSeriesDatasetRecipe, path)
end

# Registering recipes

const RECIPES = Dict{String,Vector{Datasets.DatasetRecipe}}(
"ecg5000" => [
TimeSeriesDatasetRecipe(train_file="ECG5000_TRAIN.ts", test_file="ECG5000_TEST.ts")
Expand All @@ -58,7 +55,6 @@ const RECIPES = Dict{String,Vector{Datasets.DatasetRecipe}}(
"natops" => [
TimeSeriesDatasetRecipe(train_file="NATOPS_TEST.ts", test_file="NATOPS_TRAIN.ts")
],
#! TODO.
"appliances_energy" => [
TimeSeriesDatasetRecipe(train_file="AppliancesEnergy_TRAIN.ts", test_file="AppliancesEnergy_TEST.ts", regression = true)
]
Expand Down
2 changes: 2 additions & 0 deletions FastTimeSeries/src/tasks/regression.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@ function TSRegression(blocks::Tuple{<:TimeSeriesRow, <:Continuous}, data)
return SupervisedTask(
blocks,
(
ContinuousPreprocessing(),
setup(TSPreprocessing, blocks[1], data[1].table),
),
ŷblock = blocks[2]
)
end

Expand Down