Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to 1.0 #71

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@ sudo: false
os:
- linux
julia:
- 0.6
- 1.0
- 1.1
- nightly
notifications:
email: false
matrix:
fast_finish: true
allow_failures:
- julia: nightly
addons:
apt:
packages:
Expand Down
23 changes: 23 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
name = "TimeModels"
uuid = "69470c01-11c1-51f0-b04c-03fbc3a7dfa2"
authors = ["Andrey Kolev", "Sam Urmy", "Theodore Papamarkou", "Algorithm Alpha LLC", "Gord Stephen", "Robert Luke"]
version = "0.7.0"

[deps]
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd"
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
TimeSeries = "9e3dc215-6440-5c97-bce1-76c03772f85e"

[extras]
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Random", "Statistics", "Test"]
7 changes: 0 additions & 7 deletions REQUIRE

This file was deleted.

12 changes: 6 additions & 6 deletions src/ARIMA.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,30 @@
# vectors of AR and MA coefficients and a number of differences.
# This implementation is based on Section 8.3 in Brockwell and Davis 2002,
# "Introduction to Time Series and Forecasting," 2nd ed.
function arima_statespace(ar::Vector{T}, d::I, ma::Vector{T},
function arima_statespace(ar::Vector{T}, d::I, ma::Vector{T},
sigma::T) where {T, I<:Integer}
p = length(ar)
q = length(ma)
r = max(p, q + 1)

F = [zeros(r - 1) diagm(ones(r - 1)); [zeros(max(0, r - p)), reverse(ar)]']
V = diagm([zeros(r - 1), sigma])
F = [zeros(r - 1) diagm(0 => ones(r - 1)); [zeros(max(0, r - p)), reverse(ar)]']
V = diagm(0 => [zeros(r - 1), sigma])
G = [zeros(max(0, r - q - 1)), reverse(ma), 1]'
W = zeros(1, 1)
# differencing
if d > 0
A = [zeros(d - 1), 1]
B = [zeros(d - 1) diagm(ones(d - 1));
B = [zeros(d - 1) diagm(0 => ones(d - 1));
T[(-1.0)^(d + 1 - i) * binomial(d, d-i) for i in 0:(d - 1)]']
# redefine matrices to incorporate differencing into state
F = [F zeros(r, d); A*G B]
V = [V zeros(r, d); zeros(d, r + d)]
G = [G B[end, :]]
x0 = zeros(r + d)
P0 = diagm(ones(r + d) * 1e7)
P0 = diagm(0 => ones(r + d) * 1e7)
else
x0 = zeros()
P0 = diagm(ones(r) * 1e7)
P0 = diagm(0 => ones(r) * 1e7)
end
StateSpaceModel(F, V, G, W, x0, P0)
end
Expand Down
4 changes: 2 additions & 2 deletions src/GARCH.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ function Base.show(io::IO ,fit::GarchFit)
jbstat,jbp = jbtest(fit.data./fit.sigma);
@printf io " Jarque-Bera Test\t\U1D6D8\u00B2\t%.6f\t%.6f\n\n" jbstat jbp
println(io," * Error Analysis:")
println(io," \t\tEstimate\t\Std.Error\tt value \tPr(>|t|)")
println(io," \t\tEstimate\tStd.Error\tt value \tPr(>|t|)")
@printf io " omega\t%f\t%f\t%f\t%f\n" fit.params[1] fit.secoef[1] fit.tval[1] prt(fit.tval[1])
@printf io " alpha\t%f\t%f\t%f\t%f\n" fit.params[2] fit.secoef[2] fit.tval[2] prt(fit.tval[2])
@printf io " beta \t%f\t%f\t%f\t%f\n" fit.params[3] fit.secoef[3] fit.tval[3] prt(fit.tval[3])
Expand All @@ -53,7 +53,7 @@ function cdHessian(par,LLH)
x4 = copy(par)
x4[i] -= eps[i]
x4[j] -= eps[j]
H[i,j] = (LLH(x1)-LLH(x2)-LLH(x3)+LLH(x4)) / (4.*eps[i]*eps[j])
H[i,j] = (LLH(x1) - LLH(x2) - LLH(x3) + LLH(x4)) / (4.0 * eps[i] * eps[j])
end
end
H
Expand Down
10 changes: 6 additions & 4 deletions src/TimeModels.jl
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
module TimeModels

using Base.Dates
using Dates
using Distributions
using LinearAlgebra
using NLopt
using Optim
using Printf
using SparseArrays
using StatsBase
using TimeSeries
using Optim
using NLopt
using Compat

import Base: show

Expand Down
3 changes: 1 addition & 2 deletions src/diagnostic_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ function jbtest(x::Vector)
b2 = (m4/m2^2)
statistic = n * b1/6 + n*(b2 - 3)^2/24
d = Chisq(2.)
pvalue = 1.-cdf(d,statistic)
pvalue = 1.0 - cdf(d,statistic)
statistic, pvalue
end

15 changes: 10 additions & 5 deletions src/kalman_filter.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ function Base.show(io::IO, filt::KalmanFiltered{T}) where T
println("Negative log-likelihood: $(filt.loglik)")
end

kalman_filter(y::AbstractArray, args...; kwargs...) = kalman_filter(collect(y), args...; kwargs...)

function kalman_filter(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros(size(y,1), model.nu)) where T

@assert size(u,1) == size(y,1)
Expand All @@ -38,16 +40,19 @@ function kalman_filter(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros
P_filt_i = P_pred_i
dll = 0
end
# @show x_filt_i
# @show P_filt_i
# @show dll
return x_filt_i, P_filt_i, dll
end #kalman_recursions

y = y'
u = u'
n = size(y, 2)
x_pred = zeros(model.nx, n)
x_filt = zeros(x_pred)
x_filt = zeros(size(x_pred))
P_pred = zeros(model.nx, model.nx, n)
P_filt = zeros(P_pred)
P_filt = zeros(size(P_pred))
log_likelihood = n*model.ny*log(2pi)/2

# first iteration
Expand All @@ -69,7 +74,7 @@ function kalman_filter(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros
log_likelihood += dll
end

return KalmanFiltered(x_filt', x_pred', P_filt, P_pred, model, y', u', log_likelihood)
return KalmanFiltered(collect(x_filt'), collect(x_pred'), P_filt, P_pred, model, collect(y'), collect(u'), log_likelihood)
end

function loglikelihood(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros(size(y,1), model.nu)) where T
Expand Down Expand Up @@ -124,14 +129,14 @@ function loglikelihood(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros
missing_obs = !all(y_notnan[:, t])
if missing_obs
ynnt = y_notnan[:, t]
I1, I2 = spdiagm(ynnt), spdiagm(!ynnt)
I1, I2 = spdiagm(0 => ynnt), spdiagm(0 => !ynnt)
Ct, Dut = I1 * Ct, I1 * Dut
Wt = I1 * Wt * I1 + I2 * Wt * I2
end #if

# Compute nessecary filtering quantities
innov_t = y[:, t] - Ct * x_pred_t - Dut
innov_cov_t = Ct * P_pred_t * Ct' + Wt |> full
innov_cov_t = Ct * P_pred_t * Ct' + Wt |> Matrix
nonzero_innov_cov = all(diag(innov_cov_t) .!= 0)
innov_cov_inv_t = nonzero_innov_cov ? inv(innov_cov_t) : I0ny

Expand Down
38 changes: 23 additions & 15 deletions src/kalman_smooth.jl
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ function kalman_smooth(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros
y = y .* y_notnan

x_pred_t, P_pred_t = model.x1, model.P1
x, P = zeros(model.nx, n), Array{typeof(P_pred_t)}(0)
x, P = zeros(model.nx, n), Array{typeof(P_pred_t)}(undef, 0)

innov_t = zeros(model.ny)
innov_cov_t = zeros(model.ny, model.ny)
Expand All @@ -71,7 +71,7 @@ function kalman_smooth(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros

innov = zeros(model.ny, n)
innov_cov_inv = zeros(model.ny, model.ny, n)
KC = Array{typeof(KCt)}(0)
KC = Array{typeof(KCt)}(undef, 0)

ut, Ct, Wt = zeros(model.nu), model.C(1), model.W(1)

Expand Down Expand Up @@ -110,14 +110,14 @@ function kalman_smooth(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros
missing_obs = !all(y_notnan[:, t])
if missing_obs
ynnt = y_notnan[:, t]
I1, I2 = spdiagm(ynnt), spdiagm(.!ynnt)
I1, I2 = spdiagm(0 => ynnt), spdiagm(0 => .!ynnt)
Ct, Dut = I1 * Ct, I1 * Dut
Wt = I1 * Wt * I1 + I2 * Wt * I2
end #if

# Compute nessecary filtering quantities
innov_t = y[:, t] - Ct * x_pred_t - Dut
innov_cov_t = Ct * P_pred_t * Ct' + Wt |> full
innov_cov_t = Ct * P_pred_t * Ct' + Wt |> Matrix
nonzero_innov_cov = all(diag(innov_cov_t) .!= 0)
innov_cov_inv_t = nonzero_innov_cov ? inv(innov_cov_t) : I0ny

Expand All @@ -127,15 +127,15 @@ function kalman_smooth(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros
nonzero_innov_cov && (log_likelihood += marginal_likelihood(innov_t, innov_cov_t, innov_cov_inv_t))

end #for
push!(KC, zeros(KC[1]))
push!(KC, zeros(size(KC[1])))

# Smoothing
r = zeros(model.nx)
N = zeros(model.P1)
N = zeros(size(model.P1))

for t = n:-1:1

Ct = !all(y_notnan[:, t]) ? spdiagm(y_notnan[:, t]) * model.C(t) : model.C(t)
Ct = !all(y_notnan[:, t]) ? spdiagm(0 => y_notnan[:, t]) * model.C(t) : model.C(t)

CF = Ksparse ? sparse(Ct' * innov_cov_inv[:, :, t]) : Ct' * innov_cov_inv[:, :, t]
P_pred_t = P[t]
Expand All @@ -150,15 +150,15 @@ function kalman_smooth(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros

end #for

return KalmanSmoothedMinimal(y_orig, x', P, u_orig, model, log_likelihood)
return KalmanSmoothedMinimal(y_orig, collect(x'), P, u_orig, model, log_likelihood)

end #smooth

function lag1_smooth(y::Array{T}, u::Array{T}, m::StateSpaceModel{T}) where T

A_0, A_I = zeros(m.A(1)), eye(m.A(1))
B_0, V_0, C_0 = zeros(m.B(1)), zeros(m.V(1)), zeros(m.C(1))
x1_0, P1_0 = zeros(m.x1), zeros(m.P1)
A_0, A_I = zeros(size(m.A(1))), Matrix(1.0I, size(m.A(1)))
B_0, V_0, C_0 = zeros(size(m.B(1))), zeros(size(m.V(1))), zeros(size(m.C(1)))
x1_0, P1_0 = zeros(size(m.x1)), zeros(size(m.P1))

A_stack(t) = [m.A(t) A_0; A_I A_0]
B_stack(t) = [m.B(t); B_0]
Expand Down Expand Up @@ -188,10 +188,10 @@ function kalman_smooth(filt::KalmanFiltered)
model = filt.model
x_pred = filt.predicted'
x_filt = filt.filtered'
x_smooth = zeros(x_filt)
x_smooth = zeros(size(x_filt))
P_pred = filt.pred_error_cov
P_filt = filt.error_cov
P_smoov = zeros(P_filt)
P_smoov = zeros(size(P_filt))

x_smooth[:, n] = x_filt[:, n]
P_smoov[:, :, n] = P_filt[:, :, n]
Expand All @@ -201,6 +201,14 @@ function kalman_smooth(filt::KalmanFiltered)
P_smoov[:, :, i] = P_filt[:, :, i] + J * (P_smoov[:, :, i+1] - P_pred[:,:,i+1]) * J'
end

return KalmanSmoothed(x_pred', x_filt', x_smooth', P_smoov, model, filt.y, filt.u, filt.loglik)
return KalmanSmoothed(
collect(x_pred'),
collect(x_filt'),
collect(x_smooth'),
P_smoov,
model,
filt.y,
filt.u,
filt.loglik
)
end

32 changes: 16 additions & 16 deletions src/parameter_estimation.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,29 +23,30 @@ function fit(y::Array{T}, pmodel::ParametrizedSSM, params::SSMParameters;
u_orig = copy(u)
u = u'

I_nx = speye(pmodel.nx)
I0_nx = zeros(I_nx)
I_nx = sparse(1.0I, pmodel.nx, pmodel.nx)
I0_nx = zeros(size(I_nx))

I_ny = speye(pmodel.ny)
I0_ny = zeros(I_ny)
I_ny = sparse(1.0I, pmodel.ny, pmodel.ny)
I0_ny = zeros(size(I_ny))

I_nu = speye(pmodel.nu)
I_nu = sparse(1.0I, pmodel.nu, pmodel.nu)

# Requirement: zeros-rows in G and H remain consistent
x_deterministic = all(pmodel.G(1) .== 0, 2)
x_deterministic = all(pmodel.G(1) .== 0; dims=2)
all_x_deterministic = all(x_deterministic)

y_deterministic = all(pmodel.H(1) .== 0, 2)
y_deterministic = all(pmodel.H(1) .== 0; dims=2)
all_y_deterministic = all(y_deterministic)

x1_deterministic = all(pmodel.J .== 0, 2) |> full |> vec
x1_deterministic = all(pmodel.J .== 0; dims=2) |> Matrix |> vec
all_x1_deterministic = all(x1_deterministic)
Id_x1 = spdiagm(x1_deterministic)
Id_x1 = spdiagm(0 => x1_deterministic)

if any(x_deterministic)
OQ0, OQp = I_nx[find(x_deterministic), :], I_nx[find((!).(x_deterministic)), :]
OQ0 = I_nx[findall(vec(x_deterministic)), :]
OQp = I_nx[findall((!).(vec(x_deterministic))), :]
# Id(M_t::Array{Int,2}) = spdiagm(OQ0' * all((OQ0 * M_t * OQp') .== 0, 2)[:])
Id = M_t -> spdiagm(OQ0' * all((OQ0 * M_t * OQp') .== 0, 2)[:])
Id = M_t -> spdiagm(0 => OQ0' * all((OQ0 * M_t * OQp') .== 0; dims=2)[:])
else
# Id(_::Array{Int,2}) = I0_nx
Id = t -> I0_nx
Expand Down Expand Up @@ -106,8 +107,8 @@ function fit(y::Array{T}, pmodel::ParametrizedSSM, params::SSMParameters;

function N(t::Int)
HRHt = HRH(t)
O = I_ny[find(y_notnan[:, t] .& HRH_nonzero_rows(t)), :]
return I - HRHt * O' * inv(O * HRHt * O') * O
O = I_ny[findall(y_notnan[:, t] .& HRH_nonzero_rows(t)), :]
return I - HRHt * O' * inv(Array(O * HRHt * O')) * O
end #N

ex = exs[:, 1]
Expand Down Expand Up @@ -223,7 +224,7 @@ function fit(y::Array{T}, pmodel::ParametrizedSSM, params::SSMParameters;
end #if C

if estimate_R
I2 = diagm(.!y_notnan[:, t])
I2 = diagm(0 => .!y_notnan[:, t])
eyy = I2 * (Nt * HRH(t) + Nt * Ct * Pt * Ct' * Nt') * I2 + ey * ey'
R_S1 += pmodel.R.D' * pmodel.R.D
R_S2 += pmodel.R.D' * vec(xi(t) * (eyy - eyx * Ct' -
Expand Down Expand Up @@ -350,7 +351,7 @@ function fit(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros(size(y,1)
D, D_params = parametrize_none(model.D(1))

# Q, R, P1 default to parametrized as diagonal with independent elements - any other values
# are ignored / set to zero
# are ignored / set to zero
Q, Q_params = parametrize_diag(diag(model.V(1)))
R, R_params = parametrize_diag(diag(model.W(1)))
P1, P1_params = parametrize_diag(diag(model.P1))
Expand All @@ -362,4 +363,3 @@ function fit(y::Array{T}, model::StateSpaceModel{T}; u::Array{T}=zeros(size(y,1)
fit(y, pmodel, params, u=u, eps=eps, niter=niter)

end #fit

Loading