From fe330bc39541ae8aa3d06167d6d82189c47c0592 Mon Sep 17 00:00:00 2001 From: Gord Stephen Date: Sat, 17 Oct 2015 15:55:34 -0400 Subject: [PATCH 1/7] Refactor StateSpaceModel type into seperate file (for future extension) and fix deprecation warnings --- src/Kalman.jl | 49 ------------------------------------------ src/TimeModels.jl | 1 + src/statespacemodel.jl | 49 ++++++++++++++++++++++++++++++++++++++++++ test/kalman_filter.jl | 28 ++++++++++++------------ 4 files changed, 64 insertions(+), 63 deletions(-) create mode 100644 src/statespacemodel.jl diff --git a/src/Kalman.jl b/src/Kalman.jl index d0f9333..15bb7c2 100644 --- a/src/Kalman.jl +++ b/src/Kalman.jl @@ -1,52 +1,3 @@ -issquare(x::Matrix) = size(x, 1) == size(x, 2) ? true : false - -@compat type StateSpaceModel{T} - # Process transition and noise covariance - F::Union{Matrix{T}, Matrix{Function}} - V::Matrix{T} - # Observation and noise covariance - G::Union{Matrix{T}, Matrix{Function}} - W::Matrix{T} - # Inital guesses at state and error covariance - x0::Vector{T} - P0::Matrix{T} - - @compat function StateSpaceModel(F::Union{Matrix{T}, Matrix{Function}}, V::Matrix{T}, - G::Union{Matrix{T}, Matrix{Function}}, W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) - @assert issquare(F) - @assert size(F, 1) == length(x0) - @assert issym(V) - @assert size(V) == size(F) - @assert eigmin(V) >= 0 - @assert size(G, 1) == size(W, 1) - @assert size(G, 2) == length(x0) - @assert issym(W) - @assert eigmin(W) >= 0 - @assert size(P0, 1) == length(x0) - @assert issym(P0) - @assert eigmin(P0) >= 0 - new(F, V, G, W, x0, P0) - end -end - -@compat function StateSpaceModel{T <: Real}(F::Union{Matrix{T}, Matrix{Function}}, V::Matrix{T}, G::Union{Matrix{T}, Matrix{Function}}, - W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) - StateSpaceModel{T}(F, V, G, W, x0, P0) -end - -function show{T}(io::IO, mod::StateSpaceModel{T}) - dx, dy = length(mod.x0), size(mod.G, 1) - println("StateSpaceModel{$T}, $dx-D process x $dy-D observations") - println("Process evolution matrix F:") - show(mod.F) - println("\n\nProcess error covariance V:") - show(mod.V) - println("\n\nObservation matrix G:") - show(mod.G) - println("\n\nObseration error covariance W:") - show(mod.W) -end - type KalmanFiltered{T} filtered::Array{T} predicted::Array{T} diff --git a/src/TimeModels.jl b/src/TimeModels.jl index f9ee7a9..d8ff069 100644 --- a/src/TimeModels.jl +++ b/src/TimeModels.jl @@ -33,6 +33,7 @@ export # diagnostic tests exports jbtest +include("statespacemodel.jl") include("Kalman.jl") include("ARIMA.jl") include("GARCH.jl") diff --git a/src/statespacemodel.jl b/src/statespacemodel.jl new file mode 100644 index 0000000..38b9f5f --- /dev/null +++ b/src/statespacemodel.jl @@ -0,0 +1,49 @@ +issquare(x::Matrix) = size(x, 1) == size(x, 2) ? true : false + +@compat type StateSpaceModel{T} + # Process transition and noise covariance + F::Union{Matrix{T}, Matrix{Function}} + V::Matrix{T} + # Observation and noise covariance + G::Union{Matrix{T}, Matrix{Function}} + W::Matrix{T} + # Inital guesses at state and error covariance + x0::Vector{T} + P0::Matrix{T} + + @compat function StateSpaceModel(F::Union{Matrix{T}, Matrix{Function}}, V::Matrix{T}, + G::Union{Matrix{T}, Matrix{Function}}, W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) + @assert issquare(F) + @assert size(F, 1) == length(x0) + @assert issym(V) + @assert size(V) == size(F) + @assert eigmin(V) >= 0 + @assert size(G, 1) == size(W, 1) + @assert size(G, 2) == length(x0) + @assert issym(W) + @assert eigmin(W) >= 0 + @assert size(P0, 1) == length(x0) + @assert issym(P0) + @assert eigmin(P0) >= 0 + new(F, V, G, W, x0, P0) + end +end + +@compat function StateSpaceModel{T <: Real}(F::Union{Matrix{T}, Matrix{Function}}, V::Matrix{T}, G::Union{Matrix{T}, Matrix{Function}}, + W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) + StateSpaceModel{T}(F, V, G, W, x0, P0) +end + +function show{T}(io::IO, mod::StateSpaceModel{T}) + dx, dy = length(mod.x0), size(mod.G, 1) + println("StateSpaceModel{$T}, $dx-D process x $dy-D observations") + println("Process evolution matrix F:") + show(mod.F) + println("\n\nProcess error covariance V:") + show(mod.V) + println("\n\nObservation matrix G:") + show(mod.G) + println("\n\nObseration error covariance W:") + show(mod.W) +end + diff --git a/test/kalman_filter.jl b/test/kalman_filter.jl index ea90d3b..52b64fe 100644 --- a/test/kalman_filter.jl +++ b/test/kalman_filter.jl @@ -23,7 +23,7 @@ function build(theta) end # Time varying model -function sinusoid_model(omega::Real; fs::Int=256, x0=[0.5, -0.5], W::FloatingPoint=0.1) +function sinusoid_model(omega::Real; fs::Int=256, x0=[0.5, -0.5], W::AbstractFloat=0.1) F = [1.0 0; 0 1.0] V = diagm([1e-10, 1e-10]) function G1(n); cos(2*pi*omega*(1/fs)*n); end @@ -64,7 +64,7 @@ facts("Kalman Filter") do filt = kalman_filter(y, mod1) smooth2 = kalman_smooth(filt) - @fact smooth.smoothed => smooth2.smoothed + @fact smooth.smoothed --> smooth2.smoothed end context("Model fitting") do @@ -80,8 +80,8 @@ facts("Kalman Filter") do y[100] = NaN filt = kalman_filter(y, mod1) smooth = kalman_smooth(y, mod1) - @fact any(isnan(filt.filtered)) => false - @fact any(isnan(smooth.filtered)) => false + @fact any(isnan(filt.filtered)) --> false + @fact any(isnan(smooth.filtered)) --> false end context("Return sizes") do @@ -90,10 +90,10 @@ facts("Kalman Filter") do filt = kalman_filter(y, mod1) smooth = kalman_smooth(y, mod1) - @fact size(filt.filtered) => size(filt.predicted) - @fact size(filt.filtered) => size(smooth.filtered) - @fact size(filt.filtered) => size(smooth.smoothed) - @fact size(filt.error_cov) => size(smooth.error_cov) + @fact size(filt.filtered) --> size(filt.predicted) + @fact size(filt.filtered) --> size(smooth.filtered) + @fact size(filt.filtered) --> size(smooth.smoothed) + @fact size(filt.error_cov) --> size(smooth.error_cov) end end @@ -121,7 +121,7 @@ facts("Kalman Filter") do context("Correct initial guess") do filt = kalman_filter(y, mod2) - @fact filt.predicted[end, :] => roughly([0.5 -0.5]; atol= 0.3) + @fact filt.predicted[end, :] --> roughly([0.5 -0.5]; atol= 0.3) #= x_est = round(filt.predicted[end, :], 3) =# #= display(lineplot(collect(1:size(x, 1)) / fs, vec(filt.predicted[:, 1]), width = 120, title="Filtered State 1: $(x_est[1])")) =# @@ -131,14 +131,14 @@ facts("Kalman Filter") do context("Incorrect initial guess") do mod3 = sinusoid_model(4, fs = 256, x0=[1.7, -0.2]) filt = kalman_filter(y, mod3) - @fact filt.predicted[end, :] => roughly([0.5 -0.5]; atol= 0.3) + @fact filt.predicted[end, :] --> roughly([0.5 -0.5]; atol= 0.3) end context("Model error") do mod4 = sinusoid_model(4, fs = 256, x0=[1.7, -0.2], W=3.0) filt = kalman_filter(y, mod4) - @fact filt.predicted[end, :] => roughly([0.5 -0.5]; atol= 0.3) + @fact filt.predicted[end, :] --> roughly([0.5 -0.5]; atol= 0.3) end end @@ -149,7 +149,7 @@ facts("Kalman Filter") do mod2 = sinusoid_model(4, fs = 8192) x, y = TimeModels.simulate(mod2, fs*10) smooth = kalman_smooth(y, sinusoid_model(4, fs = 8192, x0=[1.7, -0.2]) ) - @fact mean(smooth.smoothed, 1) => roughly([0.5 -0.5]; atol= 0.1) + @fact mean(smooth.smoothed, 1) --> roughly([0.5 -0.5]; atol= 0.1) #= x_est = round(smooth.smoothed[end, :], 3) =# #= display(lineplot(collect(1:size(x, 1)) / fs, vec(smooth.smoothed[1:end, 1]), width = 120, title="Smoothed State 1: $(x_est[1])")) =# @@ -166,9 +166,9 @@ facts("Kalman Filter") do y_noisy = y_true + s*randn(length(t)) lm = StateSpaceModel([1 dt; 0 1], zeros(2,2), [1. 0], s*eye(1), zeros(2), 100*eye(2)) lm_filt = kalman_filter(y_noisy, lm) - @fact lm_filt.filtered[end,1] => roughly(y_true[end], atol=4*sqrt(lm_filt.error_cov[1,1,end])) + @fact lm_filt.filtered[end,1] --> roughly(y_true[end], atol=4*sqrt(lm_filt.error_cov[1,1,end])) lm_smooth = kalman_smooth(y_noisy, lm) - @fact all((y_true - lm_smooth.smoothed[:,1]) .< 4*sqrt(lm_smooth.error_cov[1,1,:][:])) => true + @fact all((y_true - lm_smooth.smoothed[:,1]) .< 4*sqrt(lm_smooth.error_cov[1,1,:][:])) --> true end From 26b866950a4885128cd2b39780745366626f1e69 Mon Sep 17 00:00:00 2001 From: Gord Stephen Date: Sat, 17 Oct 2015 17:05:03 -0400 Subject: [PATCH 2/7] Switched F and G to generator functions, removed 0.3 support, and cleaned up dependencies --- REQUIRE | 5 +---- src/Kalman.jl | 37 +++++++++++++++---------------------- src/TimeModels.jl | 9 +-------- src/statespacemodel.jl | 29 ++++++++++++++++++----------- test/kalman_filter.jl | 12 +++++------- 5 files changed, 40 insertions(+), 52 deletions(-) diff --git a/REQUIRE b/REQUIRE index 60b7aee..8620ca2 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,8 +1,5 @@ -julia 0.3 0.5- -Compat +julia 0.4 0.5- Distributions StatsBase -Dates TimeSeries 0.5.11 -NLopt Optim diff --git a/src/Kalman.jl b/src/Kalman.jl index 15bb7c2..8c6c2fc 100644 --- a/src/Kalman.jl +++ b/src/Kalman.jl @@ -10,7 +10,7 @@ end function show{T}(io::IO, filt::KalmanFiltered{T}) n = size(filt.y, 1) - dx, dy = length(filt.model.x0), size(filt.model.G, 1) + dx, dy = length(filt.model.x0), size(filt.model.G(1), 1) println("KalmanFiltered{$T}") println("$n observations, $dx-D process x $dy-D observations") println("Negative log-likelihood: $(filt.loglik)") @@ -28,19 +28,12 @@ end function show{T}(io::IO, filt::KalmanSmoothed{T}) n = size(filt.y, 1) - dx, dy = length(filt.model.x0), size(filt.model.G, 1) + dx, dy = length(filt.model.x0), size(filt.model.G(1), 1) println("KalmanSmoothed{$T}") println("$n observations, $dx-D process x $dy-D observations") println("Negative log-likelihood: $(filt.loglik)") end -function eval_matrix(M::Matrix{Function}, i::Int, ArrayType::Type) - reshape(convert(Array{ArrayType}, [m(i) for m in M]), size(M)) -end -function eval_matrix{T <: Real}(M::Matrix{T}, n::Int, ArrayType::Type) - M -end - function simulate{T}(model::StateSpaceModel{T}, n::Int) # Generates a realization of a state space model. # @@ -52,7 +45,7 @@ function simulate{T}(model::StateSpaceModel{T}, n::Int) # dimensions of the process and observation series nx = length(model.x0) - ny = size(model.G, 1) + ny = size(model.G(1), 1) # create empty arrays to hold the state and observed series x = zeros(nx, n) @@ -60,22 +53,22 @@ function simulate{T}(model::StateSpaceModel{T}, n::Int) # Cholesky decompositions of the covariance matrices, for generating # random noise - V_chol = @compat chol(model.V, Val{:L}) - W_chol = @compat chol(model.W, Val{:L}) + V_chol = chol(model.V, Val{:L}) + W_chol = chol(model.W, Val{:L}) # Generate the series - x[:, 1] = eval_matrix(model.F, 1, T) * model.x0 + V_chol * randn(nx) - y[:, 1] = eval_matrix(model.G, 1, T) * x[:, 1] + W_chol * randn(ny) + x[:, 1] = model.F(1) * model.x0 + V_chol * randn(nx) + y[:, 1] = model.G(1) * x[:, 1] + W_chol * randn(ny) for i=2:n - x[:, i] = eval_matrix(model.F, i, T) * x[:, i-1] + V_chol * randn(nx) - y[:, i] = eval_matrix(model.G, i, T) * x[:, i] + W_chol * randn(ny) + x[:, i] = model.F(i) * x[:, i-1] + V_chol * randn(nx) + y[:, i] = model.G(i) * x[:, i] + W_chol * randn(ny) end return x', y' end function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}) - @assert size(y,2) == size(model.G,1) + @assert size(y,2) == size(model.G(1),1) function kalman_recursions(y_i, G_i, W, x_pred_i, P_pred_i) if !any(isnan(y_i)) @@ -103,18 +96,18 @@ function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}) log_likelihood = n*ny*log(2pi)/2 # first iteration - F_1 = eval_matrix(model.F, 1, T) + F_1 = model.F(1) x_pred[:, 1] = F_1 * model.x0 P_pred[:, :, 1] = F_1 * model.P0 * F_1' + model.V - x_filt[:, 1], P_filt[:,:,1], dll = kalman_recursions(y[:, 1], eval_matrix(model.G, 1, T), + x_filt[:, 1], P_filt[:,:,1], dll = kalman_recursions(y[:, 1], model.G(1), model.W, x_pred[:,1], P_pred[:,:,1]) log_likelihood += dll for i=2:n - F_i = eval_matrix(model.F, i, T) + F_i = model.F(i) x_pred[:, i] = F_i * x_filt[:, i-1] P_pred[:, :, i] = F_i * P_filt[:, :, i-1] * F_i' + model.V - x_filt[:, i], P_filt[:,:,i], dll = kalman_recursions(y[:, i], eval_matrix(model.G, i, T), + x_filt[:, i], P_filt[:,:,i], dll = kalman_recursions(y[:, i], model.G(i), model.W, x_pred[:,i], P_pred[:,:,i]) log_likelihood += dll end @@ -135,7 +128,7 @@ function kalman_smooth{T}(y::Array{T}, model::StateSpaceModel{T}; filt::KalmanFi x_smooth[:, n] = x_filt[:, n] P_smoov[:, :, n] = P_filt[:, :, n] for i = (n-1):-1:1 - J = P_filt[:, :, i] * eval_matrix(model.F, i, T)' * inv(P_pred[:,:,i+1]) + J = P_filt[:, :, i] * model.F(i)' * inv(P_pred[:,:,i+1]) x_smooth[:, i] = x_filt[:, i] + J * (x_smooth[:, i+1] - x_pred[:, i+1]) P_smoov[:, :, i] = P_filt[:, :, i] + J * (P_smoov[:, :, i+1] - P_pred[:,:,i+1]) * J' end diff --git a/src/TimeModels.jl b/src/TimeModels.jl index d8ff069..39fdc81 100644 --- a/src/TimeModels.jl +++ b/src/TimeModels.jl @@ -1,16 +1,9 @@ module TimeModels -if VERSION < v"0.4-" - using Dates - else - using Base.Dates -end - -using Compat +using Base.Dates using Distributions using StatsBase using TimeSeries -using NLopt using Optim import Base: show diff --git a/src/statespacemodel.jl b/src/statespacemodel.jl index 38b9f5f..e1af903 100644 --- a/src/statespacemodel.jl +++ b/src/statespacemodel.jl @@ -1,25 +1,27 @@ issquare(x::Matrix) = size(x, 1) == size(x, 2) ? true : false -@compat type StateSpaceModel{T} +immutable StateSpaceModel{T} # Process transition and noise covariance - F::Union{Matrix{T}, Matrix{Function}} + F::Function V::Matrix{T} # Observation and noise covariance - G::Union{Matrix{T}, Matrix{Function}} + G::Function W::Matrix{T} # Inital guesses at state and error covariance x0::Vector{T} P0::Matrix{T} - @compat function StateSpaceModel(F::Union{Matrix{T}, Matrix{Function}}, V::Matrix{T}, - G::Union{Matrix{T}, Matrix{Function}}, W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) - @assert issquare(F) - @assert size(F, 1) == length(x0) + function StateSpaceModel(F::Function, V::Matrix{T}, + G::Function, W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) + F1 = F(1) + G1 = G(1) + @assert issquare(F1) + @assert size(F1, 1) == length(x0) @assert issym(V) - @assert size(V) == size(F) + @assert size(V) == size(F1) @assert eigmin(V) >= 0 - @assert size(G, 1) == size(W, 1) - @assert size(G, 2) == length(x0) + @assert size(G1, 1) == size(W, 1) + @assert size(G1, 2) == length(x0) @assert issym(W) @assert eigmin(W) >= 0 @assert size(P0, 1) == length(x0) @@ -29,11 +31,16 @@ issquare(x::Matrix) = size(x, 1) == size(x, 2) ? true : false end end -@compat function StateSpaceModel{T <: Real}(F::Union{Matrix{T}, Matrix{Function}}, V::Matrix{T}, G::Union{Matrix{T}, Matrix{Function}}, +function StateSpaceModel{T <: Real}(F::Function, V::Matrix{T}, G::Function, W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) StateSpaceModel{T}(F, V, G, W, x0, P0) end +function StateSpaceModel{T <: Real}(F::Matrix{T}, V::Matrix{T}, G::Matrix{T}, + W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) + StateSpaceModel{T}(_->F, V, _->G, W, x0, P0) +end + function show{T}(io::IO, mod::StateSpaceModel{T}) dx, dy = length(mod.x0), size(mod.G, 1) println("StateSpaceModel{$T}, $dx-D process x $dy-D observations") diff --git a/test/kalman_filter.jl b/test/kalman_filter.jl index 52b64fe..f4e872a 100644 --- a/test/kalman_filter.jl +++ b/test/kalman_filter.jl @@ -24,13 +24,11 @@ end # Time varying model function sinusoid_model(omega::Real; fs::Int=256, x0=[0.5, -0.5], W::AbstractFloat=0.1) - F = [1.0 0; 0 1.0] - V = diagm([1e-10, 1e-10]) - function G1(n); cos(2*pi*omega*(1/fs)*n); end - function G2(n); -sin(2*pi*omega*(1/fs)*n); end - G = reshape([G1, G2], 1, 2) - W = diagm([W]) - P0 = diagm([1e-1, 1e-1]) + F(n) = [1.0 0; 0 1.0] + V = diagm([1e-10, 1e-10]) + G(n) = [cos(2*pi*omega*(1/fs)*n) -sin(2*pi*omega*(1/fs)*n)] + W = diagm([W]) + P0 = diagm([1e-1, 1e-1]) StateSpaceModel(F, V, G, W, x0, P0) end From a0f21aa83458f5c03da5aa632b6feaaa658aa53a Mon Sep 17 00:00:00 2001 From: Gord Stephen Date: Sat, 17 Oct 2015 18:11:40 -0400 Subject: [PATCH 3/7] Added exogenous system inputs + tests, and reorganized files (for later extension) --- .travis.yml | 5 +- README.md | 9 +- src/Kalman.jl | 148 ----------------------------- src/TimeModels.jl | 9 +- src/kalman_filter.jl | 71 ++++++++++++++ src/kalman_smooth.jl | 43 +++++++++ src/parameter_estimation.jl | 5 + src/statespacemodel.jl | 179 ++++++++++++++++++++++++++---------- test/kalman_filter.jl | 37 +++++--- 9 files changed, 286 insertions(+), 220 deletions(-) delete mode 100644 src/Kalman.jl create mode 100644 src/kalman_filter.jl create mode 100644 src/kalman_smooth.jl create mode 100644 src/parameter_estimation.jl diff --git a/.travis.yml b/.travis.yml index 8180b5e..2bc88d6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,11 +2,12 @@ language: julia os: - linux julia: - - 0.3 - 0.4 notifications: email: false sudo: false script: - if [[ -a .git/shallow ]]; then git fetch --unshallow; fi - - julia -e 'Pkg.clone(pwd()); Pkg.build("TimeModels"); Pkg.test("TimeModels")' + - julia -e 'Pkg.clone(pwd()); Pkg.build("TimeModels"); Pkg.test("TimeModels", coverage=true)' +after_success: + - julia -e 'cd(Pkg.dir("TimeModels")); Pkg.add("Coverage"); using Coverage; Coveralls.submit(process_folder())' diff --git a/README.md b/README.md index b6ece6f..9d84605 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,8 @@ TimeModels.jl ============ [![Build Status](https://travis-ci.org/JuliaStats/TimeModels.jl.svg?branch=master)](https://travis-ci.org/JuliaStats/TimeModels.jl) - -[![TimeModels](http://pkg.julialang.org/badges/TimeModels_0.3.svg)](http://pkg.julialang.org/?pkg=TimeModels&ver=0.3) -[![TimeModels](http://pkg.julialang.org/badges/TimeModels_0.4.svg)](http://pkg.julialang.org/?pkg=TimeModels&ver=0.4) - - +[![TimeModels](http://pkg.julialang.org/badges/TimeModels_0.4.svg)](http://pkg.julialang.org/?pkg=TimeModels&ver=0.4) ##A Julia package for modeling time series. @@ -40,7 +35,7 @@ estimates parameters of univariate normal GARCH process. #### arguments: data - data vector #### returns: -Structure containing details of the GARCH fit with the fllowing fields: +Structure containing details of the GARCH fit with the following fields: * data - orginal data * params - vector of model parameters (omega,alpha,beta) diff --git a/src/Kalman.jl b/src/Kalman.jl deleted file mode 100644 index 8c6c2fc..0000000 --- a/src/Kalman.jl +++ /dev/null @@ -1,148 +0,0 @@ -type KalmanFiltered{T} - filtered::Array{T} - predicted::Array{T} - error_cov::Array{T} - pred_error_cov::Array{T} - model::StateSpaceModel - y::Array{T} - loglik::T -end - -function show{T}(io::IO, filt::KalmanFiltered{T}) - n = size(filt.y, 1) - dx, dy = length(filt.model.x0), size(filt.model.G(1), 1) - println("KalmanFiltered{$T}") - println("$n observations, $dx-D process x $dy-D observations") - println("Negative log-likelihood: $(filt.loglik)") -end - -type KalmanSmoothed{T} - predicted::Array{T} - filtered::Array{T} - smoothed::Array{T} - error_cov::Array{T} - model::StateSpaceModel - y::Array{T} - loglik::T -end - -function show{T}(io::IO, filt::KalmanSmoothed{T}) - n = size(filt.y, 1) - dx, dy = length(filt.model.x0), size(filt.model.G(1), 1) - println("KalmanSmoothed{$T}") - println("$n observations, $dx-D process x $dy-D observations") - println("Negative log-likelihood: $(filt.loglik)") -end - -function simulate{T}(model::StateSpaceModel{T}, n::Int) - # Generates a realization of a state space model. - # - # Arguments: - # model : StateSpaceModel - # Model defining the process - # n : Int - # Number of steps to simulate. - - # dimensions of the process and observation series - nx = length(model.x0) - ny = size(model.G(1), 1) - - # create empty arrays to hold the state and observed series - x = zeros(nx, n) - y = zeros(ny, n) - - # Cholesky decompositions of the covariance matrices, for generating - # random noise - V_chol = chol(model.V, Val{:L}) - W_chol = chol(model.W, Val{:L}) - - # Generate the series - x[:, 1] = model.F(1) * model.x0 + V_chol * randn(nx) - y[:, 1] = model.G(1) * x[:, 1] + W_chol * randn(ny) - for i=2:n - x[:, i] = model.F(i) * x[:, i-1] + V_chol * randn(nx) - y[:, i] = model.G(i) * x[:, i] + W_chol * randn(ny) - end - - return x', y' -end - -function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}) - @assert size(y,2) == size(model.G(1),1) - - function kalman_recursions(y_i, G_i, W, x_pred_i, P_pred_i) - if !any(isnan(y_i)) - innov = y_i - G_i * x_pred_i - S = G_i * P_pred_i * G_i' + W # Innovation covariance - K = P_pred_i * G_i' / S #* inv(S) # Kalman gain - x_filt_i = x_pred_i + K * innov - P_filt_i = (I - K * G_i) * P_pred_i - dll = (dot(innov,S\innov) + logdet(S))/2 - else - x_filt_i = x_pred_i - P_filt_i = P_pred_i - dll = 0 - end - return x_filt_i, P_filt_i, dll - end #kalman_recursions - - y = y' - ny = size(y,1) - n = size(y, 2) - x_pred = zeros(length(model.x0), n) - x_filt = zeros(x_pred) - P_pred = zeros(size(model.P0, 1), size(model.P0, 2), n) - P_filt = zeros(P_pred) - log_likelihood = n*ny*log(2pi)/2 - - # first iteration - F_1 = model.F(1) - x_pred[:, 1] = F_1 * model.x0 - P_pred[:, :, 1] = F_1 * model.P0 * F_1' + model.V - x_filt[:, 1], P_filt[:,:,1], dll = kalman_recursions(y[:, 1], model.G(1), - model.W, x_pred[:,1], P_pred[:,:,1]) - log_likelihood += dll - - for i=2:n - F_i = model.F(i) - x_pred[:, i] = F_i * x_filt[:, i-1] - P_pred[:, :, i] = F_i * P_filt[:, :, i-1] * F_i' + model.V - x_filt[:, i], P_filt[:,:,i], dll = kalman_recursions(y[:, i], model.G(i), - model.W, x_pred[:,i], P_pred[:,:,i]) - log_likelihood += dll - end - - return KalmanFiltered(x_filt', x_pred', P_filt, P_pred, model, y', log_likelihood) -end - - -function kalman_smooth{T}(y::Array{T}, model::StateSpaceModel{T}; filt::KalmanFiltered{T} = kalman_filter(y, model)) - n = size(y, 1) - x_pred = filt.predicted' - x_filt = filt.filtered' - x_smooth = zeros(size(x_filt)) - P_pred = filt.pred_error_cov - P_filt = filt.error_cov - P_smoov = zeros(P_filt) - - x_smooth[:, n] = x_filt[:, n] - P_smoov[:, :, n] = P_filt[:, :, n] - for i = (n-1):-1:1 - J = P_filt[:, :, i] * model.F(i)' * inv(P_pred[:,:,i+1]) - x_smooth[:, i] = x_filt[:, i] + J * (x_smooth[:, i+1] - x_pred[:, i+1]) - P_smoov[:, :, i] = P_filt[:, :, i] + J * (P_smoov[:, :, i+1] - P_pred[:,:,i+1]) * J' - end - - return KalmanSmoothed(x_pred', x_filt', x_smooth', P_smoov, model, y, filt.loglik) -end - -function kalman_smooth{T}(kfiltered::KalmanFiltered{T}) - kalman_smooth(kfiltered.y, kfiltered.model, filt = kfiltered) -end - - -function fit{T}(y::Matrix{T}, build::Function, theta0::Vector{T}) - objective(theta) = kalman_filter(y, build(theta)).loglik - kfit = Optim.optimize(objective, theta0) - return (kfit.minimum, build(kfit.minimum)) -end diff --git a/src/TimeModels.jl b/src/TimeModels.jl index 39fdc81..d6eaf12 100644 --- a/src/TimeModels.jl +++ b/src/TimeModels.jl @@ -26,10 +26,17 @@ export # diagnostic tests exports jbtest +# Core functionality include("statespacemodel.jl") -include("Kalman.jl") +include("kalman_filter.jl") +include("kalman_smooth.jl") +include("parameter_estimation.jl") + +# Model specifications include("ARIMA.jl") include("GARCH.jl") + +# Tests include("diagnostic_tests.jl") end diff --git a/src/kalman_filter.jl b/src/kalman_filter.jl new file mode 100644 index 0000000..55d3e2c --- /dev/null +++ b/src/kalman_filter.jl @@ -0,0 +1,71 @@ +type KalmanFiltered{T} + filtered::Array{T} + predicted::Array{T} + error_cov::Array{T} + pred_error_cov::Array{T} + model::StateSpaceModel + y::Array{T} + u::Array{T} + loglik::T +end + +function show{T}(io::IO, filt::KalmanFiltered{T}) + n = size(filt.y, 1) + dx, dy = filt.model.nx, filt.model.ny + println("KalmanFiltered{$T}") + println("$n observations, $dx-D process x $dy-D observations") + println("Negative log-likelihood: $(filt.loglik)") +end + +function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros(size(y,1), model.nu)) + + @assert size(u,1) == size(y,1) + @assert size(y,2) == model.ny + @assert size(u,2) == model.nu + + function kalman_recursions(y_i::Vector{T}, u_i::Vector{T}, G_i::Matrix{T}, + x_pred_i::Vector{T}, P_pred_i::Matrix{T}) + if !any(isnan(y_i)) + innov = y_i - G_i * x_pred_i - model.D * u_i + S = G_i * P_pred_i * G_i' + model.W # Innovation covariance + K = P_pred_i * G_i' / S # Kalman gain + x_filt_i = x_pred_i + K * innov + P_filt_i = (I - K * G_i) * P_pred_i + dll = (dot(innov,S\innov) + logdet(S))/2 + else + x_filt_i = x_pred_i + P_filt_i = P_pred_i + dll = 0 + end + return x_filt_i, P_filt_i, dll + end #kalman_recursions + + y = y' + u = u' + n = size(y, 2) + x_pred = zeros(model.nx, n) + x_filt = zeros(x_pred) + P_pred = zeros(model.nx, model.nx, n) + P_filt = zeros(P_pred) + log_likelihood = n*model.ny*log(2pi)/2 + + # first iteration + F_1 = model.F(1) + x_pred[:, 1] = model.x1 + P_pred[:, :, 1] = model.P1 + x_filt[:, 1], P_filt[:,:,1], dll = kalman_recursions(y[:, 1], u[:, 1], model.G(1), + model.x1, model.P1) + log_likelihood += dll + + for i=2:n + F_i1 = model.F(i) + x_pred[:, i] = F_i1 * x_filt[:, i-1] + model.B * u[:, i-1] + P_pred[:, :, i] = F_i1 * P_filt[:, :, i-1] * F_i1' + model.V + x_filt[:, i], P_filt[:,:,i], dll = kalman_recursions(y[:, i], u[:, i], model.G(i), + x_pred[:,i], P_pred[:,:,i]) + log_likelihood += dll + end + + return KalmanFiltered(x_filt', x_pred', P_filt, P_pred, model, y', u', log_likelihood) +end + diff --git a/src/kalman_smooth.jl b/src/kalman_smooth.jl new file mode 100644 index 0000000..c78436e --- /dev/null +++ b/src/kalman_smooth.jl @@ -0,0 +1,43 @@ +type KalmanSmoothed{T} + predicted::Array{T} + filtered::Array{T} + smoothed::Array{T} + error_cov::Array{T} + model::StateSpaceModel + y::Array{T} + u::Array{T} + loglik::T +end + +function show{T}(io::IO, smoothed::KalmanSmoothed{T}) + n = size(smoothed.y, 1) + dx, dy = smoothed.model.nx, smoothed.model.ny + println("KalmanSmoothed{$T}") + println("$n observations, $dx-D process x $dy-D observations") + println("Negative log-likelihood: $(smoothed.loglik)") +end + +function kalman_smooth(filt::KalmanFiltered) + + n = size(filt.y, 1) + model = filt.model + x_pred = filt.predicted' + x_filt = filt.filtered' + x_smooth = zeros(x_filt) + P_pred = filt.pred_error_cov + P_filt = filt.error_cov + P_smoov = zeros(P_filt) + + x_smooth[:, n] = x_filt[:, n] + P_smoov[:, :, n] = P_filt[:, :, n] + for i = (n-1):-1:1 + J = P_filt[:, :, i] * model.F(i)' * inv(P_pred[:,:,i+1]) + x_smooth[:, i] = x_filt[:, i] + J * (x_smooth[:, i+1] - x_pred[:, i+1]) + P_smoov[:, :, i] = P_filt[:, :, i] + J * (P_smoov[:, :, i+1] - P_pred[:,:,i+1]) * J' + end + + return KalmanSmoothed(x_pred', x_filt', x_smooth', P_smoov, model, filt.y, filt.u, filt.loglik) +end + +kalman_smooth(y::Array, model::StateSpaceModel; u=zeros(size(y,1), model.nu)) = + kalman_filter(y, model, u=u) |> kalman_smooth diff --git a/src/parameter_estimation.jl b/src/parameter_estimation.jl new file mode 100644 index 0000000..1c456b5 --- /dev/null +++ b/src/parameter_estimation.jl @@ -0,0 +1,5 @@ +function fit{T}(y::Matrix{T}, build::Function, theta0::Vector{T}) + objective(theta) = kalman_filter(y, build(theta)).loglik + kfit = Optim.optimize(objective, theta0) + return (kfit.minimum, build(kfit.minimum)) +end diff --git a/src/statespacemodel.jl b/src/statespacemodel.jl index e1af903..2f0a7d3 100644 --- a/src/statespacemodel.jl +++ b/src/statespacemodel.jl @@ -1,56 +1,141 @@ -issquare(x::Matrix) = size(x, 1) == size(x, 2) ? true : false - -immutable StateSpaceModel{T} - # Process transition and noise covariance - F::Function - V::Matrix{T} - # Observation and noise covariance - G::Function - W::Matrix{T} - # Inital guesses at state and error covariance - x0::Vector{T} - P0::Matrix{T} - - function StateSpaceModel(F::Function, V::Matrix{T}, - G::Function, W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) - F1 = F(1) - G1 = G(1) - @assert issquare(F1) - @assert size(F1, 1) == length(x0) - @assert issym(V) - @assert size(V) == size(F1) - @assert eigmin(V) >= 0 - @assert size(G1, 1) == size(W, 1) - @assert size(G1, 2) == length(x0) - @assert issym(W) - @assert eigmin(W) >= 0 - @assert size(P0, 1) == length(x0) - @assert issym(P0) - @assert eigmin(P0) >= 0 - new(F, V, G, W, x0, P0) - end +immutable StateSpaceModel{T <: Real} + + # Process transition matrix, control matrix, and noise covariance + F::Function + B::Matrix{T} + V::Matrix{T} + + # Observation matrix, feed-forward matrix, and noise covariance + G::Function + D::Matrix{T} + W::Matrix{T} + + # Inital state and error covariance conditions + x1::Vector{T} + P1::Matrix{T} + + # Model dimensions + nx::Int + ny::Int + nu::Int + + function StateSpaceModel(F::Function, B::Matrix{T}, V::Matrix{T}, + G::Function, D::Matrix{T}, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) + + ispossemidef(x::Matrix) = issym(x) && (eigmin(x) >= 0) + @assert ispossemidef(V) + @assert ispossemidef(W) + @assert ispossemidef(P1) + + nx, ny, nu = confirm_matrix_sizes(F(1), B, V, G(1), D, W, x1, P1) + new(F, B, V, G, D, W, x1, P1, nx, ny, nu) + end end -function StateSpaceModel{T <: Real}(F::Function, V::Matrix{T}, G::Function, - W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) - StateSpaceModel{T}(F, V, G, W, x0, P0) +# Time-dependent definitions +function StateSpaceModel{T}(F::Function, B::Matrix{T}, V::Matrix{T}, + G::Function, D::Matrix{T}, W::Matrix{T}, + x1::Vector{T}, P1::Matrix{T}) + StateSpaceModel{T}(F, B, V, G, D, W, x1, P1) end -function StateSpaceModel{T <: Real}(F::Matrix{T}, V::Matrix{T}, G::Matrix{T}, - W::Matrix{T}, x0::Vector{T}, P0::Matrix{T}) - StateSpaceModel{T}(_->F, V, _->G, W, x0, P0) +function StateSpaceModel{T}(F::Function, V::Matrix{T}, + G::Function, W::Matrix{T}, + x1::Vector{T}, P1::Matrix{T}) + B = zeros(size(V, 1), 1) + D = zeros(size(W, 1), 1) + StateSpaceModel{T}(F, B, V, G, D, W, x1, P1) +end + +# Time-independent definitions +function StateSpaceModel{T}(F::Matrix{T}, B::Matrix{T}, V::Matrix{T}, + G::Matrix{T}, D::Matrix{T}, W::Matrix{T}, + x1::Vector{T}, P1::Matrix{T}) + StateSpaceModel{T}(_->F, B, V, _->G, D, W, x1, P1) +end + +function StateSpaceModel{T}(F::Matrix{T}, V::Matrix{T}, + G::Matrix{T}, W::Matrix{T}, + x1::Vector{T}, P1::Matrix{T}) + B = zeros(size(V, 1), 1) + D = zeros(size(W, 1), 1) + StateSpaceModel{T}(_->F, B, V, _->G, D, W, x1, P1) end function show{T}(io::IO, mod::StateSpaceModel{T}) - dx, dy = length(mod.x0), size(mod.G, 1) - println("StateSpaceModel{$T}, $dx-D process x $dy-D observations") - println("Process evolution matrix F:") - show(mod.F) - println("\n\nProcess error covariance V:") - show(mod.V) - println("\n\nObservation matrix G:") - show(mod.G) - println("\n\nObseration error covariance W:") - show(mod.W) + dx, dy = length(mod.x1), size(mod.G, 1) + println("StateSpaceModel{$T}, $dx-D process x $dy-D observations") + println("Process evolution matrix F:") + show(mod.F) + if any(mod.B .!= 0) + println("\n\nControl input matrix B:") + show(mod.B) + end #if + println("\n\nProcess error covariance V:") + show(mod.V) + println("\n\nObservation matrix G:") + show(mod.G) + if any(mod.D .!= 0) + println("\n\nFeed-forward matrix D:") + show(mod.D) + end #if + println("\n\nObseration error covariance W:") + show(mod.W) +end + +function confirm_matrix_sizes(F, B, V, G, D, W, x1, P1) + + nx = size(F, 1) + nu = size(B, 2) + ny = size(G, 1) + + @assert size(F) == (nx, nx) + @assert size(B) == (nx, nu) + @assert size(V) == (nx, nx) + + @assert size(G) == (ny, nx) + @assert size(D) == (ny, nu) + @assert size(W) == (ny, ny) + + @assert length(x1) == nx + @assert size(P1) == (nx, nx) + + return nx, ny, nu + +end #confirm_matrix_sizes + +function simulate(model::StateSpaceModel, n::Int; u::Array=zeros(n, model.nu)) + # Generates a realization of a state space model. + # + # Arguments: + # model : StateSpaceModel + # Model defining the process + # n : Int + # Number of steps to simulate. + # u : Array (optional) + # n x nu array of exogenous inputs + + @assert size(u, 1) == n + @assert size(u, 2) == model.nu + + # create empty arrays to hold the state and observed series + x = zeros(model.nx, n) + y = zeros(model.ny, n) + u = u' + + # Cholesky decompositions of the covariance matrices, for generating + # random noise + V_chol = chol(model.V, Val{:L}) + W_chol = chol(model.W, Val{:L}) + + # Generate the series + x[:, 1] = model.x1 + y[:, 1] = model.G(1) * model.x1 + model.D * u[:, 1] + W_chol * randn(model.ny) + for i=2:n + x[:, i] = model.F(i-1) * x[:, i-1] + model.B * u[:, i-1] + V_chol * randn(model.nx) + y[:, i] = model.G(i) * x[:, i] + model.D * u[:, i] + W_chol * randn(model.ny) + end + + return x', y' end diff --git a/test/kalman_filter.jl b/test/kalman_filter.jl index f4e872a..479001a 100644 --- a/test/kalman_filter.jl +++ b/test/kalman_filter.jl @@ -93,6 +93,25 @@ facts("Kalman Filter") do @fact size(filt.filtered) --> size(smooth.smoothed) @fact size(filt.error_cov) --> size(smooth.error_cov) end + + context("Linear regression test") do + m, b, s, dt = 5, 2, 2, .1 + t = 0:dt:10 + y_true = m*t + b + input = 100*[sin(t/2) sin(t/4) cos(t/2) cos(t/4)] + 10 + y_noisy = [y_true zeros(length(t)) -y_true] + + 100*[sin(t/2)+sin(t/4) sin(t/2)+cos(t/2) cos(t/2)+cos(t/4)] + 10 + randn(length(t), 3) + lm = StateSpaceModel([1 dt; 0 1], zeros(2,4), zeros(2,2), + [1. 0; 0 0; -1 0], [1. 1 0 0; 1 0 1 0; 0 0 1 1], s*eye(3), zeros(2), 100*eye(2)) + lm_filt = kalman_filter(y_noisy, lm, u=input) + @fact lm_filt.filtered[end,1] --> roughly(y_true[end, 1], atol=3*sqrt(lm_filt.error_cov[1,1,end])) + lm_smooth = kalman_smooth(lm_filt) + stderr = sqrt(lm_smooth.error_cov[1,1,:][:]) + @fact lm_filt.filtered[end,:] --> lm_smooth.smoothed[end,:] + @fact all(abs(y_true - lm_smooth.smoothed[:,1]) .< 3*stderr) --> true + @fact lm_smooth.smoothed[1,2] --> roughly(lm_smooth.smoothed[end, 2], atol=1e-12) + end + end @@ -154,24 +173,12 @@ facts("Kalman Filter") do #= display(lineplot(collect(1:size(x, 1)) / fs, vec(smooth.smoothed[1:end, 2]), width = 120, title="Smoothed State 2: $(x_est[2])")) =# end - end - context("Linear regression test") do - m, b, s, dt = 5, 2, 2, .1 - t = 0:dt:10 - y_true = m*t + b - y_noisy = y_true + s*randn(length(t)) - lm = StateSpaceModel([1 dt; 0 1], zeros(2,2), [1. 0], s*eye(1), zeros(2), 100*eye(2)) - lm_filt = kalman_filter(y_noisy, lm) - @fact lm_filt.filtered[end,1] --> roughly(y_true[end], atol=4*sqrt(lm_filt.error_cov[1,1,end])) - lm_smooth = kalman_smooth(y_noisy, lm) - @fact all((y_true - lm_smooth.smoothed[:,1]) .< 4*sqrt(lm_smooth.error_cov[1,1,:][:])) --> true - end - - context("Correct failure") do + @fact_throws StateSpaceModel([1 0.1; 0 1], zeros(2,3), zeros(2,2), + [1. 0; 0 0; -1 0], [1. 1 0 0; 1 0 1 0; 0 0 1 1], s*eye(3), zeros(2), 100*eye(2)) end -end +end From 14eff81d4483ecddf7df29885aaae69450fc60f5 Mon Sep 17 00:00:00 2001 From: Gord Stephen Date: Sun, 18 Oct 2015 12:16:01 -0400 Subject: [PATCH 4/7] Added Durbin-Koopman smoothing and relevant tests --- src/kalman_filter.jl | 26 ++++----- src/kalman_smooth.jl | 122 +++++++++++++++++++++++++++++++++++++++++- test/kalman_filter.jl | 28 +++++++--- 3 files changed, 153 insertions(+), 23 deletions(-) diff --git a/src/kalman_filter.jl b/src/kalman_filter.jl index 55d3e2c..134c7c2 100644 --- a/src/kalman_filter.jl +++ b/src/kalman_filter.jl @@ -1,20 +1,20 @@ type KalmanFiltered{T} - filtered::Array{T} - predicted::Array{T} - error_cov::Array{T} - pred_error_cov::Array{T} - model::StateSpaceModel - y::Array{T} - u::Array{T} - loglik::T + filtered::Array{T} + predicted::Array{T} + error_cov::Array{T} + pred_error_cov::Array{T} + model::StateSpaceModel + y::Array{T} + u::Array{T} + loglik::T end function show{T}(io::IO, filt::KalmanFiltered{T}) - n = size(filt.y, 1) - dx, dy = filt.model.nx, filt.model.ny - println("KalmanFiltered{$T}") - println("$n observations, $dx-D process x $dy-D observations") - println("Negative log-likelihood: $(filt.loglik)") + n = size(filt.y, 1) + dx, dy = filt.model.nx, filt.model.ny + println("KalmanFiltered{$T}") + println("$n observations, $dx-D process x $dy-D observations") + println("Negative log-likelihood: $(filt.loglik)") end function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros(size(y,1), model.nu)) diff --git a/src/kalman_smooth.jl b/src/kalman_smooth.jl index c78436e..9f1dd42 100644 --- a/src/kalman_smooth.jl +++ b/src/kalman_smooth.jl @@ -9,6 +9,15 @@ type KalmanSmoothed{T} loglik::T end +type KalmanSmoothedMinimal{T} + y::Array{T} + smoothed::Array{T} + error_cov::Array{T} + u::Array{T} + model::StateSpaceModel + loglik::T +end + function show{T}(io::IO, smoothed::KalmanSmoothed{T}) n = size(smoothed.y, 1) dx, dy = smoothed.model.nx, smoothed.model.ny @@ -17,6 +26,117 @@ function show{T}(io::IO, smoothed::KalmanSmoothed{T}) println("Negative log-likelihood: $(smoothed.loglik)") end +function show{T}(io::IO, smoothed::KalmanSmoothedMinimal{T}) + n = size(smoothed.y, 1) + dx, dy = smoothed.model.nx, smoothed.model.ny + println("KalmanSmoothedMinimal{$T}") + println("$n observations, $dx-D process x $dy-D observations") + println("Negative log-likelihood: $(smoothed.loglik)") +end + +# Durbin-Koopman Smoothing +function kalman_smooth(y::Array, model::StateSpaceModel; u::Array=zeros(size(y,1), model.nu)) + + y_orig = copy(y) + y = y' + u = u' + n = size(y, 2) + + @assert !any(isnan(u)) + @assert size(y, 1) == model.ny + @assert size(u, 1) == model.nu + @assert size(u, 2) == n + + y_notnan = !isnan(y) + y = y .* y_notnan + + x_pred, V_pred = zeros(model.nx, n), zeros(model.nx, model.nx, n) + x_smooth, V_smooth = copy(x_pred), copy(V_pred) + x_pred_t, V_pred_t = model.x1, model.P1 + + innov = zeros(model.ny, n) + innov_cov_inv = zeros(model.ny, model.ny, n) + K = zeros(model.nx, model.ny, n) + + innov_t = zeros(model.ny) + innov_cov_t = zeros(model.ny, model.ny) + innov_cov_inv_t = copy(innov_cov_t) + Kt = zeros(model.nx, model.ny) + + Ft, Gt, Dt, Wt, ut = model.F(1), model.G(1), model.D, model.W, zeros(model.nu) + + log_likelihood = n*model.ny*log(2pi)/2 + marginal_likelihood(innov::Vector, innov_cov::Matrix, innov_cov_inv::Matrix) = + (dot(innov, innov_cov_inv * innov) + logdet(innov_cov))/2 + + # Prediction and filtering + for t = 1:n + + # Predict using last iteration's values + if t > 1 + x_pred_t = Ft * x_pred_t + model.B * ut + Kt * innov_t + V_pred_t = Ft * V_pred_t * (Ft - Kt * Gt)' + model.V + V_pred_t = (V_pred_t + V_pred_t')/2 + end #if + x_pred[:, t] = x_pred_t + V_pred[:, :, t] = V_pred_t + + # Check for and handle missing observation values + missing_obs = !all(y_notnan[:, t]) + if missing_obs + ynnt = y_notnan[:, t] + I1, I2 = diagm(ynnt), diagm(!ynnt) + Gt = I1 * model.G(t) + Dt = I1 * model.D + Wt = I1 * model.W * I1 + I2 * model.W * I2 + else + Gt = model.G(t) + end #if + + Ft = model.F(t) + ut = u[:, t] + + # Compute nessecary filtering quantities + innov_t = y[:, t] - Gt * x_pred_t - Dt * ut + innov_cov_t = Gt * V_pred_t * Gt' + Wt + innov_cov_inv_t = inv(innov_cov_t) + Kt = Ft * V_pred_t * Gt' * innov_cov_inv_t + + innov[:, t] = innov_t + innov_cov_inv[:, :, t] = innov_cov_inv_t + K[:, :, t] = Kt + log_likelihood += marginal_likelihood(innov_t, innov_cov_t, innov_cov_inv_t) + + # Reset Wt, Dt if nessecary + missing_obs && ((Dt, Wt) = (model.D, model.W)) + + end #for + + # Smoothing + r, N = zeros(model.nx), zeros(model.nx, model.nx) + + for t = n:-1:1 + + Gt = model.G(t) + innov_cov_inv_t = innov_cov_inv[:, :, t] + V_pred_t = V_pred[:, :, t] + L = model.F(t) - K[:, :, t] * Gt + + r = Gt' * innov_cov_inv_t * innov[:, t] + L' * r + N = Gt' * innov_cov_inv_t * Gt + L' * N * L + + x_smooth[:, t] = x_pred[:, t] + V_pred_t * r + V_smooth_t = V_pred_t - V_pred_t * N * V_pred_t + V_smooth[:, :, t] = (V_smooth_t + V_smooth_t')/2 + + end #for + + return KalmanSmoothedMinimal(y_orig, x_smooth', V_smooth, u', model, log_likelihood) + +end #smooth + + +# Rauch-Tung-Striebel Smoothing function kalman_smooth(filt::KalmanFiltered) n = size(filt.y, 1) @@ -39,5 +159,3 @@ function kalman_smooth(filt::KalmanFiltered) return KalmanSmoothed(x_pred', x_filt', x_smooth', P_smoov, model, filt.y, filt.u, filt.loglik) end -kalman_smooth(y::Array, model::StateSpaceModel; u=zeros(size(y,1), model.nu)) = - kalman_filter(y, model, u=u) |> kalman_smooth diff --git a/test/kalman_filter.jl b/test/kalman_filter.jl index 479001a..7c511e9 100644 --- a/test/kalman_filter.jl +++ b/test/kalman_filter.jl @@ -57,12 +57,11 @@ facts("Kalman Filter") do context("Smoothing") do mod1 = build_model() x, y = TimeModels.simulate(mod1, 100) - smooth = kalman_smooth(y, mod1) - filt = kalman_filter(y, mod1) - smooth2 = kalman_smooth(filt) + smooth = kalman_filter(y, mod1) |> kalman_smooth + smooth2 = kalman_smooth(y, mod1) - @fact smooth.smoothed --> smooth2.smoothed + @fact smooth.smoothed --> roughly(smooth2.smoothed, atol=1e-2) end context("Model fitting") do @@ -77,21 +76,26 @@ facts("Kalman Filter") do y[1:9:end] = NaN y[100] = NaN filt = kalman_filter(y, mod1) - smooth = kalman_smooth(y, mod1) + smooth = kalman_smooth(filt) + smooth2 = kalman_smooth(y, mod1) @fact any(isnan(filt.filtered)) --> false @fact any(isnan(smooth.filtered)) --> false + @fact any(isnan(smooth2.smoothed)) --> false end context("Return sizes") do mod1 = build_model() x, y = TimeModels.simulate(mod1, 100) filt = kalman_filter(y, mod1) - smooth = kalman_smooth(y, mod1) + smooth = kalman_smooth(filt) + smooth2 = kalman_smooth(y, mod1) @fact size(filt.filtered) --> size(filt.predicted) @fact size(filt.filtered) --> size(smooth.filtered) @fact size(filt.filtered) --> size(smooth.smoothed) @fact size(filt.error_cov) --> size(smooth.error_cov) + @fact size(filt.filtered) --> size(smooth2.smoothed) + @fact size(filt.error_cov) --> size(smooth2.error_cov) end context("Linear regression test") do @@ -105,11 +109,19 @@ facts("Kalman Filter") do [1. 0; 0 0; -1 0], [1. 1 0 0; 1 0 1 0; 0 0 1 1], s*eye(3), zeros(2), 100*eye(2)) lm_filt = kalman_filter(y_noisy, lm, u=input) @fact lm_filt.filtered[end,1] --> roughly(y_true[end, 1], atol=3*sqrt(lm_filt.error_cov[1,1,end])) + lm_smooth = kalman_smooth(lm_filt) stderr = sqrt(lm_smooth.error_cov[1,1,:][:]) - @fact lm_filt.filtered[end,:] --> lm_smooth.smoothed[end,:] + @fact lm_filt.filtered[end,:] --> lm_smooth.smoothed[end,:] + @fact all(abs(y_true - lm_smooth.smoothed[:,1]) .< 3*stderr) --> true + @fact ones(t) * lm_smooth.smoothed[1,2] --> roughly(lm_smooth.smoothed[:, 2], atol=1e-12) + + # Repeat with DK smoother + lm_smooth = kalman_smooth(y_noisy, lm, u=input) + stderr = sqrt(lm_smooth.error_cov[1,1,:][:]) @fact all(abs(y_true - lm_smooth.smoothed[:,1]) .< 3*stderr) --> true - @fact lm_smooth.smoothed[1,2] --> roughly(lm_smooth.smoothed[end, 2], atol=1e-12) + @fact ones(t) * lm_smooth.smoothed[1,2] --> roughly(lm_smooth.smoothed[:, 2], atol=1e-12) + end end From 77579d9e26bf05dd2fd604028c60f18b5de4b18f Mon Sep 17 00:00:00 2001 From: Gord Stephen Date: Wed, 21 Oct 2015 13:25:24 -0400 Subject: [PATCH 5/7] Converted B, D matrices to time-dependent-functions --- src/kalman_filter.jl | 15 ++++++++------- src/kalman_smooth.jl | 9 +++++---- src/statespacemodel.jl | 42 +++++++++++++++++++----------------------- 3 files changed, 32 insertions(+), 34 deletions(-) diff --git a/src/kalman_filter.jl b/src/kalman_filter.jl index 134c7c2..89c1a22 100644 --- a/src/kalman_filter.jl +++ b/src/kalman_filter.jl @@ -23,10 +23,11 @@ function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=ze @assert size(y,2) == model.ny @assert size(u,2) == model.nu - function kalman_recursions(y_i::Vector{T}, u_i::Vector{T}, G_i::Matrix{T}, + function kalman_recursions(y_i::Vector{T}, u_i::Vector{T}, + G_i::Matrix{T}, D_i::Matrix{T}, x_pred_i::Vector{T}, P_pred_i::Matrix{T}) if !any(isnan(y_i)) - innov = y_i - G_i * x_pred_i - model.D * u_i + innov = y_i - G_i * x_pred_i - D_i * u_i S = G_i * P_pred_i * G_i' + model.W # Innovation covariance K = P_pred_i * G_i' / S # Kalman gain x_filt_i = x_pred_i + K * innov @@ -53,16 +54,16 @@ function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=ze F_1 = model.F(1) x_pred[:, 1] = model.x1 P_pred[:, :, 1] = model.P1 - x_filt[:, 1], P_filt[:,:,1], dll = kalman_recursions(y[:, 1], u[:, 1], model.G(1), - model.x1, model.P1) + x_filt[:, 1], P_filt[:,:,1], dll = kalman_recursions(y[:, 1], u[:, 1], + model.G(1), model.D(1), model.x1, model.P1) log_likelihood += dll for i=2:n F_i1 = model.F(i) - x_pred[:, i] = F_i1 * x_filt[:, i-1] + model.B * u[:, i-1] + x_pred[:, i] = F_i1 * x_filt[:, i-1] + model.B(i-1) * u[:, i-1] P_pred[:, :, i] = F_i1 * P_filt[:, :, i-1] * F_i1' + model.V - x_filt[:, i], P_filt[:,:,i], dll = kalman_recursions(y[:, i], u[:, i], model.G(i), - x_pred[:,i], P_pred[:,:,i]) + x_filt[:, i], P_filt[:,:,i], dll = kalman_recursions(y[:, i], u[:, i], + model.G(i), model.D(i), x_pred[:,i], P_pred[:,:,i]) log_likelihood += dll end diff --git a/src/kalman_smooth.jl b/src/kalman_smooth.jl index 9f1dd42..e5958d5 100644 --- a/src/kalman_smooth.jl +++ b/src/kalman_smooth.jl @@ -74,7 +74,7 @@ function kalman_smooth(y::Array, model::StateSpaceModel; u::Array=zeros(size(y,1 # Predict using last iteration's values if t > 1 - x_pred_t = Ft * x_pred_t + model.B * ut + Kt * innov_t + x_pred_t = Ft * x_pred_t + model.B(t-1) * ut + Kt * innov_t V_pred_t = Ft * V_pred_t * (Ft - Kt * Gt)' + model.V V_pred_t = (V_pred_t + V_pred_t')/2 end #if @@ -87,10 +87,11 @@ function kalman_smooth(y::Array, model::StateSpaceModel; u::Array=zeros(size(y,1 ynnt = y_notnan[:, t] I1, I2 = diagm(ynnt), diagm(!ynnt) Gt = I1 * model.G(t) - Dt = I1 * model.D + Dt = I1 * model.D(t) Wt = I1 * model.W * I1 + I2 * model.W * I2 else Gt = model.G(t) + Dt = model.D(t) end #if Ft = model.F(t) @@ -107,8 +108,8 @@ function kalman_smooth(y::Array, model::StateSpaceModel; u::Array=zeros(size(y,1 K[:, :, t] = Kt log_likelihood += marginal_likelihood(innov_t, innov_cov_t, innov_cov_inv_t) - # Reset Wt, Dt if nessecary - missing_obs && ((Dt, Wt) = (model.D, model.W)) + # Reset Wt if nessecary + missing_obs && (Wt = model.W) end #for diff --git a/src/statespacemodel.jl b/src/statespacemodel.jl index 2f0a7d3..b847984 100644 --- a/src/statespacemodel.jl +++ b/src/statespacemodel.jl @@ -2,12 +2,12 @@ immutable StateSpaceModel{T <: Real} # Process transition matrix, control matrix, and noise covariance F::Function - B::Matrix{T} + B::Function V::Matrix{T} # Observation matrix, feed-forward matrix, and noise covariance G::Function - D::Matrix{T} + D::Function W::Matrix{T} # Inital state and error covariance conditions @@ -19,22 +19,22 @@ immutable StateSpaceModel{T <: Real} ny::Int nu::Int - function StateSpaceModel(F::Function, B::Matrix{T}, V::Matrix{T}, - G::Function, D::Matrix{T}, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) + function StateSpaceModel(F::Function, B::Function, V::Matrix{T}, + G::Function, D::Function, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) ispossemidef(x::Matrix) = issym(x) && (eigmin(x) >= 0) @assert ispossemidef(V) @assert ispossemidef(W) @assert ispossemidef(P1) - nx, ny, nu = confirm_matrix_sizes(F(1), B, V, G(1), D, W, x1, P1) + nx, ny, nu = confirm_matrix_sizes(F(1), B(1), V, G(1), D(1), W, x1, P1) new(F, B, V, G, D, W, x1, P1, nx, ny, nu) end end # Time-dependent definitions -function StateSpaceModel{T}(F::Function, B::Matrix{T}, V::Matrix{T}, - G::Function, D::Matrix{T}, W::Matrix{T}, +function StateSpaceModel{T}(F::Function, B::Function, V::Matrix{T}, + G::Function, D::Function, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) StateSpaceModel{T}(F, B, V, G, D, W, x1, P1) end @@ -42,8 +42,8 @@ end function StateSpaceModel{T}(F::Function, V::Matrix{T}, G::Function, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) - B = zeros(size(V, 1), 1) - D = zeros(size(W, 1), 1) + B(_) = zeros(size(V, 1), 1) + D(_) = zeros(size(W, 1), 1) StateSpaceModel{T}(F, B, V, G, D, W, x1, P1) end @@ -51,14 +51,14 @@ end function StateSpaceModel{T}(F::Matrix{T}, B::Matrix{T}, V::Matrix{T}, G::Matrix{T}, D::Matrix{T}, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) - StateSpaceModel{T}(_->F, B, V, _->G, D, W, x1, P1) + StateSpaceModel{T}(_->F, _->B, V, _->G, _->D, W, x1, P1) end function StateSpaceModel{T}(F::Matrix{T}, V::Matrix{T}, G::Matrix{T}, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) - B = zeros(size(V, 1), 1) - D = zeros(size(W, 1), 1) + B(_) = zeros(size(V, 1), 1) + D(_) = zeros(size(W, 1), 1) StateSpaceModel{T}(_->F, B, V, _->G, D, W, x1, P1) end @@ -67,18 +67,14 @@ function show{T}(io::IO, mod::StateSpaceModel{T}) println("StateSpaceModel{$T}, $dx-D process x $dy-D observations") println("Process evolution matrix F:") show(mod.F) - if any(mod.B .!= 0) - println("\n\nControl input matrix B:") - show(mod.B) - end #if + println("\n\nControl input matrix B:") + show(mod.B) println("\n\nProcess error covariance V:") show(mod.V) println("\n\nObservation matrix G:") show(mod.G) - if any(mod.D .!= 0) - println("\n\nFeed-forward matrix D:") - show(mod.D) - end #if + println("\n\nFeed-forward matrix D:") + show(mod.D) println("\n\nObseration error covariance W:") show(mod.W) end @@ -130,10 +126,10 @@ function simulate(model::StateSpaceModel, n::Int; u::Array=zeros(n, model.nu)) # Generate the series x[:, 1] = model.x1 - y[:, 1] = model.G(1) * model.x1 + model.D * u[:, 1] + W_chol * randn(model.ny) + y[:, 1] = model.G(1) * model.x1 + model.D(1) * u[:, 1] + W_chol * randn(model.ny) for i=2:n - x[:, i] = model.F(i-1) * x[:, i-1] + model.B * u[:, i-1] + V_chol * randn(model.nx) - y[:, i] = model.G(i) * x[:, i] + model.D * u[:, i] + W_chol * randn(model.ny) + x[:, i] = model.F(i-1) * x[:, i-1] + model.B(i-1) * u[:, i-1] + V_chol * randn(model.nx) + y[:, i] = model.G(i) * x[:, i] + model.D(i) * u[:, i] + W_chol * randn(model.ny) end return x', y' From aa8b48fedb7dac0079a48650d04dc65fe314ad00 Mon Sep 17 00:00:00 2001 From: Gord Stephen Date: Wed, 21 Oct 2015 13:58:53 -0400 Subject: [PATCH 6/7] Converted V, W to time-dependent functions --- src/kalman_filter.jl | 12 +++++++----- src/kalman_smooth.jl | 25 +++++++++++-------------- src/statespacemodel.jl | 40 ++++++++++++++++++++-------------------- test/kalman_filter.jl | 6 +++--- 4 files changed, 41 insertions(+), 42 deletions(-) diff --git a/src/kalman_filter.jl b/src/kalman_filter.jl index 89c1a22..b1c06eb 100644 --- a/src/kalman_filter.jl +++ b/src/kalman_filter.jl @@ -24,11 +24,11 @@ function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=ze @assert size(u,2) == model.nu function kalman_recursions(y_i::Vector{T}, u_i::Vector{T}, - G_i::Matrix{T}, D_i::Matrix{T}, + G_i::Matrix{T}, D_i::Matrix{T}, W_i::Matrix{T}, x_pred_i::Vector{T}, P_pred_i::Matrix{T}) if !any(isnan(y_i)) innov = y_i - G_i * x_pred_i - D_i * u_i - S = G_i * P_pred_i * G_i' + model.W # Innovation covariance + S = G_i * P_pred_i * G_i' + W_i # Innovation covariance K = P_pred_i * G_i' / S # Kalman gain x_filt_i = x_pred_i + K * innov P_filt_i = (I - K * G_i) * P_pred_i @@ -55,15 +55,17 @@ function kalman_filter{T}(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=ze x_pred[:, 1] = model.x1 P_pred[:, :, 1] = model.P1 x_filt[:, 1], P_filt[:,:,1], dll = kalman_recursions(y[:, 1], u[:, 1], - model.G(1), model.D(1), model.x1, model.P1) + model.G(1), model.D(1), model.W(1), + model.x1, model.P1) log_likelihood += dll for i=2:n F_i1 = model.F(i) x_pred[:, i] = F_i1 * x_filt[:, i-1] + model.B(i-1) * u[:, i-1] - P_pred[:, :, i] = F_i1 * P_filt[:, :, i-1] * F_i1' + model.V + P_pred[:, :, i] = F_i1 * P_filt[:, :, i-1] * F_i1' + model.V(i-1) x_filt[:, i], P_filt[:,:,i], dll = kalman_recursions(y[:, i], u[:, i], - model.G(i), model.D(i), x_pred[:,i], P_pred[:,:,i]) + model.G(i), model.D(i), model.W(i), + x_pred[:,i], P_pred[:,:,i]) log_likelihood += dll end diff --git a/src/kalman_smooth.jl b/src/kalman_smooth.jl index e5958d5..34e2476 100644 --- a/src/kalman_smooth.jl +++ b/src/kalman_smooth.jl @@ -63,7 +63,7 @@ function kalman_smooth(y::Array, model::StateSpaceModel; u::Array=zeros(size(y,1 innov_cov_inv_t = copy(innov_cov_t) Kt = zeros(model.nx, model.ny) - Ft, Gt, Dt, Wt, ut = model.F(1), model.G(1), model.D, model.W, zeros(model.nu) + Ft, Gt, Dt, Wt, ut = model.F(1), model.G(1), model.D(1), model.W(1), zeros(model.nu) log_likelihood = n*model.ny*log(2pi)/2 marginal_likelihood(innov::Vector, innov_cov::Matrix, innov_cov_inv::Matrix) = @@ -75,28 +75,28 @@ function kalman_smooth(y::Array, model::StateSpaceModel; u::Array=zeros(size(y,1 # Predict using last iteration's values if t > 1 x_pred_t = Ft * x_pred_t + model.B(t-1) * ut + Kt * innov_t - V_pred_t = Ft * V_pred_t * (Ft - Kt * Gt)' + model.V + V_pred_t = Ft * V_pred_t * (Ft - Kt * Gt)' + model.V(t) V_pred_t = (V_pred_t + V_pred_t')/2 end #if x_pred[:, t] = x_pred_t V_pred[:, :, t] = V_pred_t + # Assign new values + Ft = model.F(t) + Gt = model.G(t) + Dt = model.D(t) + Wt = model.W(t) + ut = u[:, t] + # Check for and handle missing observation values missing_obs = !all(y_notnan[:, t]) if missing_obs ynnt = y_notnan[:, t] I1, I2 = diagm(ynnt), diagm(!ynnt) - Gt = I1 * model.G(t) - Dt = I1 * model.D(t) - Wt = I1 * model.W * I1 + I2 * model.W * I2 - else - Gt = model.G(t) - Dt = model.D(t) + Gt, Dt = I1 * Gt, I1 * Dt + Wt = I1 * Wt * I1 + I2 * Wt * I2 end #if - Ft = model.F(t) - ut = u[:, t] - # Compute nessecary filtering quantities innov_t = y[:, t] - Gt * x_pred_t - Dt * ut innov_cov_t = Gt * V_pred_t * Gt' + Wt @@ -108,9 +108,6 @@ function kalman_smooth(y::Array, model::StateSpaceModel; u::Array=zeros(size(y,1 K[:, :, t] = Kt log_likelihood += marginal_likelihood(innov_t, innov_cov_t, innov_cov_inv_t) - # Reset Wt if nessecary - missing_obs && (Wt = model.W) - end #for # Smoothing diff --git a/src/statespacemodel.jl b/src/statespacemodel.jl index b847984..550b54e 100644 --- a/src/statespacemodel.jl +++ b/src/statespacemodel.jl @@ -3,12 +3,12 @@ immutable StateSpaceModel{T <: Real} # Process transition matrix, control matrix, and noise covariance F::Function B::Function - V::Matrix{T} + V::Function # Observation matrix, feed-forward matrix, and noise covariance G::Function D::Function - W::Matrix{T} + W::Function # Inital state and error covariance conditions x1::Vector{T} @@ -19,31 +19,31 @@ immutable StateSpaceModel{T <: Real} ny::Int nu::Int - function StateSpaceModel(F::Function, B::Function, V::Matrix{T}, - G::Function, D::Function, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) + function StateSpaceModel(F::Function, B::Function, V::Function, + G::Function, D::Function, W::Function, x1::Vector{T}, P1::Matrix{T}) ispossemidef(x::Matrix) = issym(x) && (eigmin(x) >= 0) - @assert ispossemidef(V) - @assert ispossemidef(W) + @assert ispossemidef(V(1)) + @assert ispossemidef(W(1)) @assert ispossemidef(P1) - nx, ny, nu = confirm_matrix_sizes(F(1), B(1), V, G(1), D(1), W, x1, P1) + nx, ny, nu = confirm_matrix_sizes(F(1), B(1), V(1), G(1), D(1), W(1), x1, P1) new(F, B, V, G, D, W, x1, P1, nx, ny, nu) end end # Time-dependent definitions -function StateSpaceModel{T}(F::Function, B::Function, V::Matrix{T}, - G::Function, D::Function, W::Matrix{T}, +function StateSpaceModel{T}(F::Function, B::Function, V::Function, + G::Function, D::Function, W::Function, x1::Vector{T}, P1::Matrix{T}) StateSpaceModel{T}(F, B, V, G, D, W, x1, P1) end -function StateSpaceModel{T}(F::Function, V::Matrix{T}, - G::Function, W::Matrix{T}, +function StateSpaceModel{T}(F::Function, V::Function, + G::Function, W::Function, x1::Vector{T}, P1::Matrix{T}) - B(_) = zeros(size(V, 1), 1) - D(_) = zeros(size(W, 1), 1) + B(_) = zeros(size(V(1), 1), 1) + D(_) = zeros(size(W(1), 1), 1) StateSpaceModel{T}(F, B, V, G, D, W, x1, P1) end @@ -51,7 +51,7 @@ end function StateSpaceModel{T}(F::Matrix{T}, B::Matrix{T}, V::Matrix{T}, G::Matrix{T}, D::Matrix{T}, W::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) - StateSpaceModel{T}(_->F, _->B, V, _->G, _->D, W, x1, P1) + StateSpaceModel{T}(_->F, _->B, _->V, _->G, _->D, _->W, x1, P1) end function StateSpaceModel{T}(F::Matrix{T}, V::Matrix{T}, @@ -59,7 +59,7 @@ function StateSpaceModel{T}(F::Matrix{T}, V::Matrix{T}, x1::Vector{T}, P1::Matrix{T}) B(_) = zeros(size(V, 1), 1) D(_) = zeros(size(W, 1), 1) - StateSpaceModel{T}(_->F, B, V, _->G, D, W, x1, P1) + StateSpaceModel{T}(_->F, B, _->V, _->G, D, _->W, x1, P1) end function show{T}(io::IO, mod::StateSpaceModel{T}) @@ -121,15 +121,15 @@ function simulate(model::StateSpaceModel, n::Int; u::Array=zeros(n, model.nu)) # Cholesky decompositions of the covariance matrices, for generating # random noise - V_chol = chol(model.V, Val{:L}) - W_chol = chol(model.W, Val{:L}) + V_chol(t) = chol(model.V(t), Val{:L}) + W_chol(t) = chol(model.W(t), Val{:L}) # Generate the series x[:, 1] = model.x1 - y[:, 1] = model.G(1) * model.x1 + model.D(1) * u[:, 1] + W_chol * randn(model.ny) + y[:, 1] = model.G(1) * model.x1 + model.D(1) * u[:, 1] + W_chol(1) * randn(model.ny) for i=2:n - x[:, i] = model.F(i-1) * x[:, i-1] + model.B(i-1) * u[:, i-1] + V_chol * randn(model.nx) - y[:, i] = model.G(i) * x[:, i] + model.D(i) * u[:, i] + W_chol * randn(model.ny) + x[:, i] = model.F(i-1) * x[:, i-1] + model.B(i-1) * u[:, i-1] + V_chol(i-1) * randn(model.nx) + y[:, i] = model.G(i) * x[:, i] + model.D(i) * u[:, i] + W_chol(1) * randn(model.ny) end return x', y' diff --git a/test/kalman_filter.jl b/test/kalman_filter.jl index 7c511e9..5f1f1e8 100644 --- a/test/kalman_filter.jl +++ b/test/kalman_filter.jl @@ -25,11 +25,11 @@ end # Time varying model function sinusoid_model(omega::Real; fs::Int=256, x0=[0.5, -0.5], W::AbstractFloat=0.1) F(n) = [1.0 0; 0 1.0] - V = diagm([1e-10, 1e-10]) + V(n) = diagm([1e-10, 1e-10]) G(n) = [cos(2*pi*omega*(1/fs)*n) -sin(2*pi*omega*(1/fs)*n)] - W = diagm([W]) + w(n) = diagm([W]) P0 = diagm([1e-1, 1e-1]) - StateSpaceModel(F, V, G, W, x0, P0) + StateSpaceModel(F, V, G, w, x0, P0) end From c8582b4b206034fab7744e9939c30b2cf6d2df20 Mon Sep 17 00:00:00 2001 From: Gord Stephen Date: Wed, 21 Oct 2015 14:29:02 -0400 Subject: [PATCH 7/7] Update NEWS.md for 0.1.0 --- NEWS.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/NEWS.md b/NEWS.md index c12f115..496c74d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,11 @@ +### 0.1.0 + +* Only supports Julia 0.4.x +* Adds exogenous inputs in state space model transition and observation equations +* Allows all system matrices (transition, control input, transition noise covariance, +observation, feed-forward, observation noise covariance) to be time-dependent +* Implements Durbin-Koopman Kalman smoothing algorithm + ### 0.0.3 * support floor of Julia 0.3 and non-inclusive ceiling of Julia 0.5-