Skip to content

Commit 5a6d021

Browse files
authored
Merge pull request #256 from codeboy5/inceptiontime
InceptionTime Model for Time Series
2 parents 3d70b50 + 45160f6 commit 5a6d021

File tree

13 files changed

+1021
-54
lines changed

13 files changed

+1021
-54
lines changed

FastTimeSeries/Project.toml

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ authors = ["FluxML Community"]
44
version = "0.1.0"
55

66
[deps]
7+
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
78
DataDeps = "124859b0-ceae-595e-8997-d05f6a7a8dfe"
89
FastAI = "5d0beca9-ade8-49ae-ad0b-a3cf890e669f"
910
FilePathsBase = "48062228-2e41-5def-b9a4-89aafe57970f"
@@ -13,7 +14,6 @@ MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
1314
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
1415
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
1516
UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
16-
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
1717

1818
[compat]
1919
DataDeps = "0.7"
@@ -23,5 +23,4 @@ Flux = "0.12, 0.13"
2323
InlineTest = "0.2"
2424
MLUtils = "0.2"
2525
UnicodePlots = "2, 3"
26-
julia = "1.6"
27-
Zygote = "0.6"
26+
julia = "1.6"

FastTimeSeries/src/FastTimeSeries.jl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,14 @@ using FilePathsBase
2727
using InlineTest
2828
using Statistics
2929
using UnicodePlots
30+
using Flux
3031

3132
# Blocks
3233
include("blocks/timeseriesrow.jl")
3334

3435
# Encodings
3536
include("encodings/tspreprocessing.jl")
37+
include("encodings/continuouspreprocessing.jl")
3638

3739
# Models
3840
include("models/Models.jl")

FastTimeSeries/src/container.jl

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,3 @@
1-
#= TODO: loadfile
2-
3-
elseif endswith(file, ".ts")
4-
return _ts2df(file)
5-
6-
=#
7-
81
Datasets.loadfile(file::String, ::Val{:ts}) = _ts2df(file)
92

103
#TimeSeriesDataset
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
struct ContinuousPreprocessing <: Encoding
2+
numlabels::Int
3+
end
4+
5+
ContinuousPreprocessing() = ContinuousPreprocessing(1)
6+
7+
decodedblock(c::ContinuousPreprocessing, block::AbstractArray) = Continuous(c.numlabels)
8+
9+
function encode(::ContinuousPreprocessing, _, block::Continuous, obs)
10+
return [obs]
11+
end
12+
13+
function decode(::ContinuousPreprocessing, _, block::AbstractArray, obs)
14+
return obs[1]
15+
end

FastTimeSeries/src/models.jl

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@ Construct a model for time-series classification.
77
function blockmodel(inblock::TimeSeriesRow,
88
outblock::OneHotTensor{0},
99
backbone)
10-
data = rand(Float32, inblock.nfeatures, 32, inblock.obslength)
11-
# data = [rand(Float32, inblock.nfeatures, 32) for _ ∈ 1:inblock.obslength]
10+
data = zeros(Float32, inblock.nfeatures, 1, 1)
1211
output = backbone(data)
12+
Flux.reset!(backbone)
1313
return Models.RNNModel(backbone, outsize = length(outblock.classes), recout = size(output, 1))
1414
end
1515

@@ -24,4 +24,6 @@ end
2424

2525
# ## Tests
2626

27-
@testset "blockbackbone" begin @test_nowarn FastAI.blockbackbone(TimeSeriesRow(1,140)) end
27+
@testset "blockbackbone" begin
28+
@test_nowarn FastAI.blockbackbone(TimeSeriesRow(1,140))
29+
end
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
"""
2+
InceptionModule(ni::Int, nf::Int, ks::Int = 40, bottleneck::Bool = true)
3+
4+
An InceptionModule consists of an (optional) bottleneck, followed by
5+
3 conv1d layers.
6+
"""
7+
function InceptionModule(ni::Int, nf::Int, kernel_size::Int = 40, bottleneck::Bool = true)
8+
ks = [kernel_size ÷ (2^i) for i in 0:2]
9+
ks = [ks[i] % 2 == 0 ? ks[i] - 1 : ks[i] for i in 1:3] # ensure odd ks
10+
bottleneck = ni > 1 ? bottleneck : false
11+
12+
bottleneck_block = bottleneck ? Conv1d(ni, nf, 1, bias = false) : identity
13+
14+
convs_layers =
15+
[Conv1d(bottleneck ? nf : ni, nf, ks[i], bias = false) for i in 1:3]
16+
17+
convs = Chain(bottleneck_block, Parallel(hcat, convs_layers...))
18+
19+
maxconvpool = Chain(MaxPool((3,), pad = 1, stride = 1), Conv1d(ni, nf, 1, bias = false))
20+
21+
return Chain(Parallel(hcat, convs, maxconvpool), BatchNorm(nf * 4, relu))
22+
end
23+
24+
"""
25+
InceptionBlock(ni::Int, nf::Int = 32, residual::Bool = true, depth::Int = 6)
26+
27+
An InceptionBlock consists of variable number of InceptionModule depending on the depth.
28+
Optionally residual.
29+
"""
30+
function InceptionBlock(ni::Int, nf::Int = 32, residual::Bool = true, depth::Int = 6)
31+
inception = []
32+
shortcut = []
33+
34+
for d in 1:depth
35+
push!(inception, InceptionModule(d == 1 ? ni : nf * 4, nf))
36+
if residual && d % 3 == 0
37+
n_in = d == 3 ? ni : nf * 4
38+
n_out = nf * 4
39+
skip =
40+
n_in == n_out ? BatchNorm(n_out) :
41+
Chain(Conv1d(n_in, n_out, 1), BatchNorm(n_out))
42+
push!(shortcut, skip)
43+
end
44+
end
45+
46+
blocks = []
47+
d = 1
48+
49+
while d <= depth
50+
blk = []
51+
while d <= depth
52+
push!(blk, inception[d])
53+
if d % 3 == 0
54+
d += 1
55+
break
56+
end
57+
d += 1
58+
end
59+
if residual && d ÷ 3 <= length(shortcut)
60+
skp = shortcut[d÷3]
61+
push!(blocks, Parallel(+, Chain(blk...), skp))
62+
else
63+
push!(blocks, Chain(blk...))
64+
end
65+
end
66+
return Chain(blocks...)
67+
end
68+
69+
changedims(X) = permutedims(X, (2, 1, 3))
70+
71+
"""
72+
InceptionTime(c_in::Int, c_out::Int, seq_len = nothing, nf::Int = 32)
73+
74+
A Julia Implemention of the InceptionTime model.
75+
From https://arxiv.org/abs/1909.04939
76+
77+
## Arguments.
78+
79+
- `c_in` : The number of input channels.
80+
- `c_out`: The number of output classes.
81+
- `nf` : The number of "hidden channels" to use.
82+
"""
83+
function InceptionTime(c_in::Int, c_out::Int, nf::Int = 32)
84+
inceptionblock = InceptionBlock(c_in, nf)
85+
gap = GAP1d(1)
86+
fc = Dense(nf * 4, c_out)
87+
return Chain(changedims, inceptionblock, gap, fc)
88+
end

FastTimeSeries/src/models/Models.jl

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,13 @@ using Flux
66
using Zygote
77
using DataDeps
88
using InlineTest
9+
using ChainRulesCore
910

10-
include("StackedLSTM.jl")
11+
# include("StackedLSTM.jl")
12+
include("layers.jl")
1113
include("RNN.jl")
14+
include("InceptionTime.jl")
1215

13-
export StackedLSTM, RNNModel
16+
export StackedLSTM, RNNModel, InceptionTime
1417

1518
end

FastTimeSeries/src/models/RNN.jl

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,4 @@
1-
function tabular2rnn(X::AbstractArray{Float32, 3})
2-
X = permutedims(X, (1, 3, 2))
3-
return X
4-
end
1+
tabular2rnn(X::AbstractArray{<:AbstractFloat, 3}) = permutedims(X, (1, 3, 2))
52

63
struct RNNModel{A, B}
74
recbackbone::A
@@ -22,16 +19,15 @@ is passed through a dropout layer before a 'finalclassifier' block.
2219
- `dropout_rate`: Dropout probability for the dropout layer.
2320
"""
2421

25-
function RNNModel(recbackbone;
26-
outsize,
27-
recout,
28-
kwargs...)
22+
function RNNModel(recbackbone; outsize, recout)
2923
return RNNModel(recbackbone, Dense(recout, outsize))
3024
end
3125

3226
function (m::RNNModel)(X)
3327
X = tabular2rnn(X)
34-
Flux.reset!(m.recbackbone)
28+
ChainRulesCore.ignore_derivatives() do
29+
Flux.reset!(m.recbackbone)
30+
end
3531
X = m.recbackbone(X)[:, :, end]
3632
return m.finalclassifier(X)
3733
end

FastTimeSeries/src/models/StackedLSTM.jl

Lines changed: 0 additions & 26 deletions
This file was deleted.

FastTimeSeries/src/models/layers.jl

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,40 @@ Create a Global Adaptive Pooling + Flatten layer.
66
function GAP1d(output_size::Int)
77
gap = AdaptiveMeanPool((output_size,))
88
Chain(gap, Flux.flatten)
9+
end
10+
11+
"""
12+
StackedLSTM(in, out, hiddensize, layers)
13+
14+
Stacked LSTM network. Feeds the data through a chain of LSTM layers, where the hidden state
15+
of the previous layer gets fed to the next one. The first layer corresponds to
16+
`LSTM(in, hiddensize)`, the hidden layers to `LSTM(hiddensize, hiddensize)`, and the final
17+
layer to `LSTM(hiddensize, out)`. Takes the keyword argument `init` for the initialization
18+
of the layers.
19+
20+
"""
21+
function StackedLSTM(c_in::Int, c_out::Integer, hiddensize::Integer, layers::Integer;
22+
init=Flux.glorot_uniform)
23+
if layers == 1
24+
return Chain(LSTM(c_in, c_out; init=init))
25+
elseif layers == 2
26+
return Chain(LSTM(c_in, hiddensize; init=init),
27+
LSTM(hiddensize, c_out; init=init))
28+
else
29+
chain_vec = [LSTM(c_in, hiddensize; init=init)]
30+
for i = 1:layers - 2
31+
push!(chain_vec, LSTM(hiddensize, hiddensize; init=init))
32+
end
33+
return Chain(chain_vec..., LSTM(hiddensize, c_out; init=init))
34+
end
35+
end
36+
37+
function Conv1d(ni, nf, ks; stride = 1, padding = Flux.SamePad, dilation = 1, bias = true)
38+
return Conv(
39+
(ks,),
40+
ni => nf,
41+
stride = stride,
42+
pad = ks ÷ 2 * dilation;
43+
bias = false
44+
)
945
end

0 commit comments

Comments
 (0)