# This file contains the code necessary to reproduce the analysis in
# 'CollocInfer: A Library for Collocation Inference in Differential Equation
# Models' by Giles Hooker, Jim Ramsay and Luo Xiao.
# The code below is segmented in the order it appears in the paper. It is also
# segmented into three examples, separated by horizontal lines of ####. We have
# indicated which (sub)section each part of the code is produced by.
library("CollocInfer")
library("deSolve")
################################################################################
## Sections 1 and 2 -- the FitzHugh Nagumo Equations
# Section 1.2 -- Introduction to the equations
# To ensure reproducibility
set.seed((2004 * 2007)/2014)
# The first section of this demo file provides the code to generate simulation
# data from the FitzHugh-Nagumo model. We will delete it later and call a
# standard set of data with data(FhNdata). This is for the purposes of providing
# reproducible results; the data we will use were generated with exactly the
# commands below.
# First we will define some variable and parameter names
FhNvarnames <- c("V", "R")
FhNparnames <- c("a", "b", "c")
# and initial conditions and parameters
x0 <- c(-1, 1)
names(x0) <- FhNvarnames
FhNpars <- c(0.2, 0.2, 3)
names(FhNpars) <- FhNparnames
# The following is a function specifying the FitzHugh-Nagumo model in a form that
# lsoda works with
fhn.ode <- function(times, x, p) {
dx <- x
dimnames(dx) <- dimnames(x)
dx["V"] <- p["c"] * (x["V"] - x["V"]^3/3 + x["R"])
dx["R"] <- -(x["V"] - p["a"] + p["b"] * x["R"])/p["c"]
return(list(dx))
}
# We need the times at which we will observe the system
FhNplottimes <- seq(0, 20, 0.05)
# And now we can create solutions to the equations
out <- lsoda(x0, times = FhNplottimes, fhn.ode, FhNpars)
# and plot what the solutions look like: Figure 1, left panel
par(mar = c(5, 5, 1, 1))
matplot(out[, 1], out[, 2:3], type = "l", xlab = "time", ylab = "(V,R)", lwd = 3,
cex.lab = 2.5, cex.axis = 2.5)
legend("bottomleft", c("V", "R"), lwd = 3, col = 1:2, lty = 1:2, cex = 1.5)
# Figure 1, right panel
par(mar = c(5, 5, 1, 1))
plot(out[, 2], out[, 3], type = "l", lwd = 3, xlab = "V", ylab = "R", cex.lab = 2.5,
cex.axis = 2.5)
# We now add some noise to the values of the curves at a reduced set of sampling
# times:
FhNtimes <- seq(0, 20, 0.5)
FhNn <- length(FhNtimes)
out <- lsoda(x0, times = FhNtimes, fhn.ode, FhNpars)
FhNdata <- out[, 2:3] + 0.1 * matrix(rnorm(2 * FhNn), FhNn, 2)
## Section 1.3
# The following functions provide an example B-spline basis to show the reader
FhNrange <- c(0, 20)
breaks <- seq(0, 20, 4)
ExampleBasis <- create.bspline.basis(FhNrange, norder = 4, breaks = breaks)
# Figure 2:
par(mar = c(5, 6, 1, 1))
plot(ExampleBasis, xlab = "time", ylab = "Phi(t)", lwd = 2, cex.lab = 2.5, cex.axis = 2.5)
## Section 1.4
# Let's undo all this work and call a standard data set generated from this code
# with which to run the profiling proceedures.
rm(list = ls())
data(FhNdata)
# In order to run the profiling proceedures, we need to define some objects.
# The following code will define a basis
FhNrange <- c(0, 20)
breaks <- seq(0, 20, 0.5)
norder <- 4
FhNbasis <- create.bspline.basis(range = FhNrange, norder = norder, breaks = breaks)
# And this will smooth the data
FhNfdPar <- fdPar(FhNbasis, int2Lfd(2), 1)
DEfd0 <- smooth.basis(FhNtimes, FhNdata, FhNfdPar)$fd
# and produce a plot of how well this agrees with the smooth
pdf("FhNplotfit.pdf", width = 8, height = 6)
par(mfrow = c(2, 1), mar = c(5, 5, 2, 1))
plotfit.fd(FhNdata, FhNtimes, DEfd0, cex.axis = 2.5, cex.lab = 2.5, lwd = 2, cex = 1.5)
dev.off()
### Section 3
# Section 3.1
# We can now extract the coefficients, which we will also require
coefs0 <- DEfd0$coef
colnames(coefs0) <- FhNvarnames
# CollocInfer requires the right hand side function to be defined somewhat
# differently to lsoda. Here we allow a matrix of values as input
fhn.fun <- function(times, x, p, more) {
dx <- x
dx[, "V"] <- p["c"] * (x[, "V"] - x[, "V"]^3/3 + x[, "R"])
dx[, "R"] <- -(x[, "V"] - p["a"] + p["b"] * x[, "R"])/p["c"]
return(dx)
}
# Now we can choose a trade-off parameter and set up the objects that the
# profiling functions will use.
lambda <- 1000
profile.obj <- LS.setup(pars = FhNpars, fn = fhn.fun, lambda = lambda, times = FhNtimes,
coefs = coefs0, basisvals = FhNbasis)
proc <- profile.obj$proc
lik <- profile.obj$lik
## Section 3.2
## Gradient matching can be obtained thr ParsMatchOpt and produces useful initial
## parameter estimates
Pres0 <- ParsMatchOpt(FhNpars, coefs0, proc)
pars1 <- Pres0$pars
## Section 3.3
## Inner optimisation to smooth the data using the differential equation as a
## penalty
Ires1 <- inneropt(FhNdata, times = FhNtimes, pars1, coefs0, lik, proc)
coefs1 <- Ires1$coefs
# Alternatively, Smooth.LS avoids the need to run LS.setup, and it also returs
# the lik and proc objects.
Ires1.2 <- Smooth.LS(fhn.fun, FhNdata, FhNtimes, FhNpars, coefs0, FhNbasis, lambda)
# And outer optimization to choose parameters
Ores2 <- outeropt(FhNdata, FhNtimes, pars1, coefs1, lik, proc)
# Here the relevant objects to extract are parameter estimates and the
# corresponding coefficients.
pars2 <- Ores2$pars
coefs2 <- Ores2$coefs
# Alternatively, Profile.LS will do both setup and profiling
Ores2.2 <- Profile.LS(fhn.fun, FhNdata, FhNtimes, FhNpars, coefs0, FhNbasis, lambda)
## Section 3.4: Forwards Prediction Error can be used to choose lambda
# We need to define a matrix where each row gives the index of the observation
# time to start solving the ODE from, and the observation time to stop.
whichtimes <- cbind(1:31, 11:41)
# then we call
FPE <- forward.prediction.error(FhNtimes, FhNdata, coefs2, lik, proc, pars2, whichtimes)
# Usually we would do this over a selection of lambda values
# Figure 4: Left panel:
par(mar = c(5, 5, 1, 1))
matplot(FhNtimes, FhNdata, pch = FhNvarnames, cex = 1.5, cex.lab = 2.5, cex.axis = 2.5)
lambdas <- 10^(0:7)
FPEs <- 0 * lambdas
temp.pars <- FhNpars
temp.coefs <- coefs0
for (ilam in 1:length(lambdas)) {
print(paste("lambda = ", lambdas[ilam]))
t.Ores <- Profile.LS(fhn.fun, FhNdata, FhNtimes, temp.pars, temp.coefs, FhNbasis,
lambdas[ilam])
print(t.Ores$pars)
temp.pars <- t.Ores$pars
temp.coefs <- t.Ores$coefs
# if(is.element(ilam,c(1,4,8))){
t.fd <- fd(t.Ores$coefs, FhNbasis)
lines(t.fd, lwd = 2, col = ceiling(ilam/2), lty = 1)
# }
FPEs[ilam] <- forward.prediction.error(FhNtimes, FhNdata, t.Ores$coefs, lik,
proc, t.Ores$pars, whichtimes)
}
legend("bottomleft", c("lambda = 1", "lambda = 1e3", "lambda = 1e7"), col = c(1,
2, 4), lwd = 2, cex = 1.2)
# Figure 4, right panel, Forward Prediction Errors:
FPEs
par(mar = c(5, 5, 1, 1))
plot(log10(lambdas), FPEs, type = "l", lwd = 2, col = 4, cex.lab = 2.5, cex.axis = 2.5)
## Section 3.5
# FPE makes use of a function IntegrateForward which interfaces a proc object to
# lsoda. We can use it directly to solve the ODE at the estimated parameters and
# compre this to data as follows (assuming we know x0):
x0 <- c(-1, 1)
names(x0) <- FhNvarnames
sol1 <- IntegrateForward(x0, FhNtimes, Ores2$pars, proc)
# Figure 5:
par(mar = c(5, 5, 1, 1))
matplot(FhNtimes, FhNdata, pch = c("V", "R"), cex = 1.5, cex.lab = 2.5, cex.axis = 2.5)
matplot(sol1$times, sol1$states, type = "l", lwd = 3, add = TRUE)
## Section 3.6
# Some diagnostic plots help to work out if there are problems with the model fit
# to the data or with the smooth fit to the model.
# Figure 6:
out1 <- CollocInferPlots(Ores2$coefs, Ores2$pars, lik, proc, times = FhNtimes, data = FhNdata,
cex.lab = 2.5, cex.axis = 2.5, cex = 1.5, lwd = 3, newplot = TRUE)
## Section 3.7
# Covariance of parameters can be obtained from
covar <- Profile.covariance(Ores2$pars, times = FhNtimes, data = FhNdata, coefs = Ores2$coefs,
lik = lik, proc = proc)
covar
# and we can look at confidence intervals
CIs <- cbind(Ores2$pars - 2 * sqrt(diag(covar)), Ores2$pars + 2 * sqrt(diag(covar)))
rownames(CIs) <- FhNparnames
CIs
## Section 3.8
## When only some state variables are observed, we can still conduct profiling.
## to mimic this, first set the second column of data to be NA
data2 <- FhNdata
data2[, 2] <- NA
# and remember that we also don't get coefficients, we'll set these to zero
coefs0.2 <- coefs0
coefs0.2[, 2] <- 0
# The function FitMatchOpt allows us to pull some columns of the coefficient
# matrix into line with the differential equation (keeping the other columns
# fixed):
Fres3 <- FitMatchOpt(coefs0.2, 2, pars1, proc)
# And we can now proceed with profiling as before:
Ores4 <- outeropt(FhNdata, FhNtimes, pars1, Fres3$coefs, lik, proc)
Ores4$pars
out2 <- CollocInferPlots(Ores4$coefs, Ores4$pars, lik, proc, times = FhNtimes, data = data2,
newplot = TRUE)
covar4 <- Profile.covariance(Ores4$pars, times = FhNtimes, data = data2, coefs = Ores4$coefs,
lik = lik, proc = proc)
CI4 <- cbind(Ores4$pars - 2 * sqrt(diag(covar4)), Ores4$pars + 2 * sqrt(diag(covar4)))
rownames(CI4) <- FhNparnames
CI4
###############################################################################
## Section 3.9: Chemostat Data and the Rosenzweig-MacArthur Model
# The per-individual growth rates for the Rosenzweig-Macarthur model are given
# below:
RosMac <- function(t, x, p, more) {
p <- exp(p)
x <- exp(x)
dx <- x
dx[, "C"] <- p["rho"] * (1 - x[, "C"]/p["kappaC"]) - p["gamma"] * x[, "B"]/(p["kappaB"] +
x[, "C"])
dx[, "B"] <- p["chi"] * p["gamma"] * x[, "C"]/(p["kappaB"] + x[, "C"]) - p["delta"]
return(dx)
}
# We can now obtain some data (from Becks et. al., 2010, Ecology Letters)
data(ChemoRMData)
time <- ChemoRMTime
data <- log(ChemoRMData)
# Figure 7:
par(mar = c(5, 5, 1, 1))
matplot(time, data, pch = c("C", "B"), xlab = "days", ylab = "log(Obs)", cex.lab = 2.5,
cex.axis = 2.5, cex = 1.5)
# and set up some names for the variables and the parameters
varnames <- RMvarnames
parnames <- RMparnames
# We now define some basis function on the new range of time
rr <- range(time)
breaks <- seq(rr[1], rr[2], by = 1)
mids <- c(min(breaks), (breaks[1:(length(breaks) - 1)] + 0.5), max(breaks))
ChemoRMbasis <- create.bspline.basis(rr, norder = 4, breaks = breaks)
# An inital set of parameters and coefficients
coef0 <- smooth.basis(time, data, fdPar(ChemoRMbasis, int2Lfd(2), 10))$fd$coef
colnames(coef0) <- varnames
pars <- log(ChemoRMPars)
names(pars) <- parnames
# We now list the indices of the parameters that we wish to estimate:
activepars <- 3:6
# And set up some profiling objects
out <- LS.setup(pars = pars, coefs = coef0, basisvals = ChemoRMbasis, fn = RosMac,
lambda = c(50000, 500), times = time)
lik <- out$lik
proc <- out$proc
# We will start by running Gradient Matching
res1 <- ParsMatchOpt(pars, coef0, proc, active = 3:6)
# And with these we will run the profiling procedure
res2 <- outeropt(data, time, res1$pars, coef0, lik, proc, active = activepars)
# and plot the results (Figure 8)
pdf("Chemoplot3.pdf")
out2 <- CollocInferPlots(res2$coefs, res2$pars, lik, proc, times = time, data = data,
cex.lab = 2.5, cex.axis = 2.5, cex = 1.5, lwd = 3, newplot = TRUE)
dev.off()
## Now we can look at the covariance
covar <- Profile.covariance(res2$pars, times = time, data = data, coefs = res2$coefs,
lik = lik, proc = proc, active = activepars)
CIs <- cbind(res2$pars[activepars] - 2 * sqrt(diag(covar)), res2$pars[activepars] +
2 * sqrt(diag(covar)))
rownames(CIs) <- parnames[activepars]
exp(CIs)
# We can also examine forward prediction error
whichtimes <- cbind(1:102, 6:107)
lambdafac <- c(0.1, 0.5, 1, 5, 10)
FPEs <- 0 * lambdafac
temp.pars <- res2$pars
temp.coefs <- res2$coefs
for (ilam in 1:length(lambdafac)) {
t.res <- Profile.LS(RosMac, data, time, temp.pars, temp.coefs, ChemoRMbasis,
lambdafac[ilam] * c(50000, 500), active = activepars, out.meth = "nlminb")
FPEs[ilam] <- forward.prediction.error(time, data, t.res$coefs, lik, proc, t.res$pars,
whichtimes)
temp.pars <- t.res$pars
temp.coefs <- t.res$coefs
}
FPEs
################################################################################
## Section 4: An extended Rosenzweig-MacArthur Model
# To ensure reproducibility, we will re-set the seed
set.seed((2004 * 2007)/2014)
# First, we write down the three-species Rosenzweig-Macarthur model in a form
# suitable for CollocInfer
RosMac2 <- function(t, x, p, more) {
p <- exp(p)
dx <- x
dx[, "C1"] <- p["rho1"] * x[, "C1"] * (1 - x[, "C1"]/p["kappaC1"] - x[, "C2"]/p["kappaC2"]) -
p["pi"] * p["gamma"] * x[, "C1"] * x[, "B"]/(p["kappaB"] + p["pi"] * x[,
"C1"] + x[, "C2"])
dx[, "C2"] <- p["rho2"] * x[, "C2"] * (1 - x[, "C1"]/p["kappaC1"] - x[, "C2"]/p["kappaC2"]) -
p["gamma"] * x[, "C2"] * x[, "B"]/(p["kappaB"] + p["pi"] * x[, "C1"] + x[,
"C2"])
dx[, "B"] <- p["chi"] * p["gamma"] * (p["pi"] * x[, "C1"] + x[, "C2"]) * x[,
"B"]/(p["kappaB"] + p["pi"] * x[, "C1"] + x[, "C2"]) - p["delta"] * x[, "B"]
return(dx)
}
# We will define some interesting parameters
RMpars <- c(0.2, 0.025, 0.125, 22000, 1e+05, 5e+06, 1, 1e+09, 0.3)
RMParnames <- c("pi", "rho1", "rho2", "kappaC1", "kappaC2", "gamma", "chi", "kappaB",
"delta")
# Which we represent on the log scale
logpars <- log(RMpars)
names(logpars) <- RMParnames
# And we also need some initial conditions (with named entries)
RMVarnames <- c("C1", "C2", "B")
x0 <- c(50, 50, 2)
names(x0) <- RMVarnames
# Section 4.1
# Now we need to produce some data. The following version of RosMac2 is suitable
# for use with lsoda. Note that we represent these models on the log scale x =
# log(z)
RosMac2ODE <- function(t, z, p) {
p <- exp(p)
x <- exp(z)
dx <- x
dx["C1"] <- p["rho1"] * x["C1"] * (1 - x["C1"]/p["kappaC1"] - x["C2"]/p["kappaC2"]) -
p["pi"] * p["gamma"] * x["C1"] * x["B"]/(p["kappaB"] + p["pi"] * x["C1"] +
x["C2"])
dx["C2"] <- p["rho2"] * x["C2"] * (1 - x["C2"]/p["kappaC2"] - x["C1"]/p["kappaC1"]) -
p["gamma"] * x["C2"] * x["B"]/(p["kappaB"] + p["pi"] * x["C1"] + x["C2"])
dx["B"] <- p["chi"] * p["gamma"] * (p["pi"] * x["C1"] + x["C2"]) * x["B"]/(p["kappaB"] +
p["pi"] * x["C1"] + x["C2"]) - p["delta"] * x["B"]
return(list(dx/x))
}
# With this we can solve the ODE at 200 successive days
time <- 0:200
res0 <- lsoda(log(x0), time, RosMac2ODE, p = logpars)
# We'll obtain data by adding noise to these solutions
data <- res0[, 2:4] + 0.2 * matrix(rnorm(603), 201, 3)
# and name the columns
colnames(data) <- RMVarnames
# This gives us the following plot (Figure 9)
matplot(data, cex.lab = 2.5, cex.axis = 2.5, cex = 1.5, xlab = "days", pch = c("1",
"2", "B"))
matplot(res0[, 2:4], type = "l", lwd = 3, add = TRUE)
# Now we need to set up the CollocInfer machinery
# First we'll define a basis with breaks each time point
rr <- range(time)
breaks <- seq(rr[1], rr[2], by = 1)
RMbasis <- create.bspline.basis(rr, norder = 4, breaks = breaks)
# And obtain an initial set of parameters and coefficients from smoothing
coef0 <- smooth.basis(time, data, fdPar(RMbasis, int2Lfd(2), 10))$fd$coef
colnames(coef0) <- RMVarnames
# We will now create the profiling objects, but use the log transformation by
# setting posproc=TRUE
out <- LS.setup(pars = logpars, coefs = coef0, basisvals = RMbasis, fn = RosMac2,
lambda = 1e+05, times = time, posproc = TRUE)
lik <- out$lik
proc <- out$proc
# We'll do gradient matching to get parameter estimates corresponding to this
# smooth
res1 <- ParsMatchOpt(logpars, coef0, proc)
# And now profiling
res3 <- outeropt(data, time, res1$pars, coef0, lik, proc)
# Let's have a look at the parameters that we obtained
exp(res3$pars)
# and the agreement with data and model (Figure 10)
out3 <- CollocInferPlots(res3$coefs, res3$pars, lik, proc, times = time, data = data,
cex.lab = 2.5, cex.axis = 2.5, cex = 1.5, lwd = 3, newplot = TRUE)
## Section 4.2
## Now we will complicate the model. In reality, we only observe the sum of C1 and
## C2.
data2 <- cbind(log(exp(data[, "C1"]) + exp(data[, "C2"])), data[, "B"])
# Figure 11:
matplot(data2, cex.lab = 2.5, cex.axis = 2.5, pch = c("C", "B"), cex = 1.5, xlab = "days")
# To deal with this, we need to define a transformation function from our state
# variables to the expected observations. In this case (since the states are on
# the log scale) we exponentiate to get back to the original scale, add C1 and C2
# and then take the log again.
RMobsfn <- function(t, x, p, more) {
x <- exp(x)
y <- cbind(x[, "C1"] + x[, "C2"], x[, "B"])
return(log(y))
}
# We can now create new profiling objects that incorporate this transformation
# function by specfying likfn
out <- LS.setup(pars = logpars, coefs = coef0, basisvals = RMbasis, fn = RosMac2,
lambda = 1e+05, times = time, posproc = TRUE, likfn = RMobsfn)
lik2 <- out$lik
proc2 <- out$proc
# To see how we perform in this situation, we'll start by setting the two columns
# of the data smooth to zero
coef02 <- coef0
coef02[, 1:2] <- 0
# We'll now pull these columns into line with the rotifer column (which we can
# still smooth.
Fres3 <- FitMatchOpt(coef02, 1:2, res1$pars, proc2)
# And we'll run profiling and have a look at what we get.
res32 <- outeropt(data2, time, res1$pars, Fres3$coefs, lik2, proc2)
exp(res32$pars)
# Diagnostic plots (Figure 12):
out32 <- CollocInferPlots(res32$coefs, res32$pars, lik2, proc2, times = time, data = data2,
datanames = c("C", "B"), cex.lab = 2.5, cex.axis = 2.5, cex = 1.5, lwd = 3, newplot = TRUE)
################################################################################
## Section 4.3
# In this framework is is not unreasonable to expect that we have repeated
# experiments. When these are very well structured and all have common
# observation times, this can be easily accommodated in CollocInfer (it can
# accommodate less regular replicated experiments, but requires work to set
# things up manuall.
# We'll create a second experiment starting from new initial conditions
x03 <- c(15, 25, 4)
names(x03) <- RMVarnames
res03 <- lsoda(log(x03), time, RosMac2ODE, p = logpars)
data03 <- res03[, 2:4] + 0.2 * matrix(rnorm(603), 201, 3)
# and set up a three dimensional array in which the experiment number is the
# second dimension and the third dimension is the variable being measured (this
# is to agree with conventions in the fda package)
alldat <- array(0, c(201, 2, 3))
alldat[, 1, ] <- data
alldat[, 2, ] <- data03
# Nowe we'll smooth the second experiment
coef3 <- smooth.basis(time, data03, fdPar(RMbasis, int2Lfd(2), 10))$fd$coef
# and create a three-dimensional array with all the coefficients together
coefs <- array(0, c(dim(coef3)[1], 2, 3))
coefs[, 1, ] <- coef0
coefs[, 2, ] <- coef3
# These three dimensional arrays can be given to LSsetup which understands thi
# structure and knows what to do with it.
out <- LS.setup(pars = logpars, coefs = coefs, basisvals = RMbasis, fn = RosMac2,
lambda = 1e+05, times = time, data = alldat, posproc = TRUE, names = RMVarnames)
lik3 <- out$lik
proc3 <- out$proc
# We can now call the inner optimisation to use a model-based smooth
res13 <- inneropt(data = out$data, times = out$times, pars = res1$pars, coefs = out$coefs,
lik = lik3, proc = proc3)
# And use profiling to estimate parameters
res33 <- outeropt(data = out$data, times = out$times, res1$pars, res13$coefs, lik3,
proc3)
# These parameters should hopefully be closer to the truth than with only one
# experimental run
exp(res33$pars)
# And we can also examine the fit to the data and model. In this case, the times
# vector wraps around creating a few unpleasant graphical effects.
out3 <- CollocInferPlots(res33$coefs, res33$pars, lik3, proc3, times = out$times,
data = out$data, datanames = c("B", "C"), cex.lab = 2.5, cex.axis = 2.5, cex = 1.5,
lwd = 3, newplot = TRUE)
################################################################################
## Appendix B: Manual Setup
# Here we create lik2 and proc2 (corresponding to the indirectly observed
# single-run experiment above in order to demonstrate their structure. Note that
# this code is out of order from the paper so that the Rosenzweig-MacArthur model
# example can be kept in one section.
# First we need to specify the matrices of basis values that we will use at
# observation time points
bvals.obs <- eval.basis(time, RMbasis)
# and quadrature times, these are midpoints between breaks plus the end points
# The quadrature weights are all equal, but we have multiplied them by the lambda
# that we are using, in this case 1e5.
qpts <- c(breaks[1], breaks[1:(length(breaks) - 1)] + diff(breaks)/2, breaks[length(breaks)])
qwts <- 1e+05 * matrix(1, length(qpts), 3)/length(qpts)
# basis values for proc is a list
bvals.quad <- list(bvals = eval.basis(qpts, RMbasis), dbvals = eval.basis(qpts, RMbasis,
1))
# Now we create the lik object
# make.SSElik() sets up the squared error criteria
lik.m <- make.SSElik()
# Attach the values of the basis expansion at the observation time points
lik.m$bvals <- bvals.obs
# We need to specify the transformation of the state variables that is to be
# compared with the data. In this case, we will use finite differencing to to
# compute the derivatives that we need; we can achieve this by first employing
# make.findif.ode()
lik.m$more <- make.findif.ode()
# and then telling findif.ode that the function it is finite differencing is
# RMobsfn
lik.m$more$more <- list(fn = RMobsfn, eps = 1e-06, more = NULL)
# We also need to give lik.m a set of weights. This has to occur in the more
# element of lik.m because it is used inside lik.m$fn.
lik.m$more$weights <- array(1, dim(data))
# We can also create the proc object manually. First we call make.SSEproc() in
# order to set up the squared error functions
proc.m <- make.SSEproc()
# We also specify the basis values and their derivatives at the quadrature points
proc.m$bvals <- bvals.quad
# Now we need to tell SSEproc about the right hand side of the ODE and its
# derivatives. Here we will use finite differencing again, as in lik.m:
proc.m$more <- make.findif.ode()
# In fact, we are going to finite difference the right hand side of the the ODE
# for the log transformed data. Here we specify the log transform
proc.m$more$more <- list(fn = make.logtrans()$fn, eps = 1e-06)
# and then give it the (non log-transform) Rosenzweig-MacArthur equations
proc.m$more$more$more$fn <- RosMac2
# proc.m$more also needs to include some elements for internal processing. In
# particular the following specify the quadrature points and weights
proc.m$more$weights <- qwts
proc.m$more$qpts <- qpts
# We will also tell it about the parameter and variable names.
proc.m$more$parnames <- RMParnames
proc.m$more$names <- RMVarnames
# And let's check that this all works
Fres.m3 <- FitMatchOpt(coef02, 1:2, res1$pars, proc.m)
res.m32 <- outeropt(data2, time, res1$pars, Fres3$coefs, lik2, proc.m)
exp(res.m32$pars)
out.m32 <- CollocInferPlots(res.m32$coefs, res.m32$pars, lik.m, proc.m, times = time,
data = data2)
# If you compare this to out32, you should have the same estimates.
###############################################################################
## Section 6: The Henon Example
# This example demonstrates the use of CollocInfer on a discrete-time system.
# The Henon Map is a classical dynamical system that exhibits chaos. It's
# equations are given as x[t+1] = 1 - a*x[t]^2 + y[t] y[t+1] = b*x[t]
# To ensure reproducibility
set.seed((2004 * 2007)/2014)
# First we will choose some parameters
hpars <- c(1.4, 0.3)
# And generate some trajectories in discrete time
ntimes <- 200
x <- c(-1, 1)
X <- matrix(0, ntimes + 20, 2)
X[1, ] <- x
for (i in 2:(ntimes + 20)) {
X[i, ] <- make.Henon()$ode(i, X[i - 1, ], hpars, NULL)
}
X <- X[20 + 1:ntimes, ]
# Let's have a look at this (Figure 14):
par(mar = c(5, 5, 1, 1))
plot(X, xlab = "x", ylab = "y", cex.lab = 2.5, cex.axis = 2.5, cex = 1.5)
# And now we'll add on some noise to produce data
Y <- X + 0.05 * matrix(rnorm(ntimes * 2), ntimes, 2)
t <- 1:ntimes
# Starting off coefficients estimates can just be the observations in this case:
coefs <- as.matrix(Y)
# Now we'll use a set of starting parameters that aren't thsoe we generated the
# data with.
hpars2 <- c(1.3, 0.4) # Perturbed parameters
names(hpars2) <- names(hpars)
# Basic value for lambda -- note that Forwards Prediction Error does not make
# sense for discrete systems
lambda <- 10000
# We'll run smoothing and profiling on these data with squared error to begin
# with
Ires1 <- Smooth.LS(make.Henon(), data = Y, times = t, pars = hpars2, coefs = coefs,
basisvals = NULL, lambda = lambda, in.meth = "nlminb", discrete = TRUE)
Ores1 <- Profile.LS(make.Henon(), data = Y, t, pars = hpars2, coefs, basisvals = NULL,
lambda = lambda, in.meth = "nlminb", out.meth = "nls", discrete = TRUE)
# Code to generate plots from Figure 15:
# Extract parameters from profiling
parest <- Ores1$pars
# and coefficients; in this case these are our trajectory
Fit <- Ores1$coef
# Create a two-panel plot; one for each component of the state vector. Here we
# plot the trajectory and the data:
par(mfrow = c(2, 1), mar = c(5, 5, 1, 1))
plot(t, Fit[, 1], type = "l", ylab = "x", xlab = "time", lty = 2, cex.lab = 2.5,
cex.axis = 2.5, lwd = 3)
points(t, Y[, 1], col = 2, pch = 8, cex = 1.5)
plot(t, Fit[, 2], type = "l", lty = 2, ylab = "y", xlab = "time", cex.lab = 2.5,
cex.axis = 2.5, lwd = 3)
points(t, Y[, 2], col = 2, cex = 1.5, pch = 8)
# We can also compare prediction and new estimate -- equivalent of derivative
# plots
# One step ahead predictions
pred <- Ores1$proc$more$fn(t, Fit, Ores1$pars, Ores1$proc$more$more)
# Plot predictions along with the next value of the trajectory (hence removing
# the first time point in Fit.
par(mfrow = c(2, 1), mar = c(5, 5, 1, 1))
plot(t, pred[, 1], type = "l", ylab = "x", xlab = "time", cex.lab = 2.5, cex.axis = 2.5,
lwd = 3)
points(t[-ntimes], Fit[-1, 1], col = 2, type = "l", cex = 1.5, pch = 8)
plot(t, pred[, 2], type = "l", ylab = "y", xlab = "time", cex.lab = 2.5, cex.axis = 2.5,
lwd = 3)
points(t[-ntimes], Fit[-1, 2], col = 2, type = "l", cex = 1.5, pch = 8)
# and we can also look at how far apart these are -- in this case we won't joint
# them up with lines.
par(mar = c(5, 5, 1, 1))
matplot(t[-ntimes], Fit[-1, ] - pred[-ntimes, ], xlab = "time", ylab = "Model Residuals",
cex.lab = 2.5, cex.axis = 2.5, pch = c("x", "y"), cex = 1.5)
# A demonstration of the use of functions for multivariate normal log likelihood
# -- Smooth.multinorm and Profile.multinorm work in exactly the same way that
# Smooth.LS and Profile.LS do.
var <- c(1, 0.01)
Ires3 <- Smooth.multinorm(make.Henon(), data = Y, t, pars = hpars2, coefs, basisvals = NULL,
var = var, in.meth = "nlminb", discrete = TRUE)
Ores3 <- Profile.multinorm(fn = make.Henon(), data = Y, times = t, pars = hpars2,
coefs = coefs, basisvals = NULL, var = var, fd.obj = NULL, more = NULL, quadrature = NULL,
in.meth = "nlminb", out.meth = "optim", eps = 1e-06, active = NULL, , discrete = TRUE)
Ores3$pars