R Under development (unstable) (2024-04-29 r86495 ucrt) -- "Unsuffered Consequences" Copyright (C) 2024 The R Foundation for Statistical Computing Platform: x86_64-w64-mingw32/x64 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(testthat) > library(mvgam) Loading required package: mgcv Loading required package: nlme This is mgcv 1.9-1. For overview type 'help("mgcv-package")'. Loading required package: Rcpp Loading required package: brms Loading 'brms' package (version 2.21.0). Useful instructions can be found by typing help('brms'). A more detailed introduction to the package is available through vignette('brms_overview'). Attaching package: 'brms' The following objects are masked from 'package:mgcv': s, t2 The following object is masked from 'package:stats': ar Loading required package: marginaleffects Loading required package: insight Welcome to mvgam. Please cite as: Clark, NJ, and Wells, K. 2022. Dynamic Generalized Additive Models (DGAMs) for forecasting discrete ecological time series. Methods in Ecology and Evolution, 2022, https://doi.org/10.1111/2041-210X.13974 > > test_check("mvgam") // Stan model code generated by package mvgam functions { /* Spectral density function of a Gaussian process * with squared exponential covariance kernel * Args: * l_gp: numeric eigenvalues of an SPD GP * alpha_gp: marginal SD parameter * rho_gp: length-scale parameter * Returns: * numeric values of the GP function evaluated at l_gp */ vector spd_cov_exp_quad(data vector l_gp, real alpha_gp, real rho_gp) { int NB = size(l_gp); vector[NB] out; real constant = square(alpha_gp) * (sqrt(2 * pi()) * rho_gp); real neg_half_lscale2 = -0.5 * square(rho_gp); for (m in 1:NB) { out[m] = constant * exp(neg_half_lscale2 * square(l_gp[m])); } return out; } vector rep_each(vector x, int K) { int N = rows(x); vector[N * K] y; int pos = 1; for (n in 1:N) { for (k in 1:K) { y[pos] = x[n]; pos += 1; } } return y; } } data { int k_gp_trend_time_byrandom; // basis functions for approximate gp vector[k_gp_trend_time_byrandom] l_gp_trend_time_byrandom; // approximate gp eigenvalues array[23] int b_trend_idx_gp_time_byrandom; // gp basis coefficient indices int total_obs; // total number of observations int n; // number of timepoints per series int n_lv; // number of dynamic factors int n_sp; // number of smoothing parameters int n_sp_trend; // number of trend smoothing parameters int n_series; // number of series matrix[n_series, n_lv] Z; // matrix mapping series to latent states int num_basis; // total number of basis coefficients int num_basis_trend; // number of trend basis coefficients vector[num_basis_trend] zero_trend; // prior locations for trend basis coefficients vector[num_basis] zero; // prior locations for basis coefficients matrix[total_obs, num_basis] X; // mgcv GAM design matrix matrix[n * n_lv, num_basis_trend] X_trend; // trend model design matrix array[n, n_series] int ytimes; // time-ordered matrix (which col in X belongs to each [time, series] observation?) int k_gp_time_byrandom; // basis functions for approximate gp vector[k_gp_time_byrandom] l_gp_time_byrandom; // approximate gp eigenvalues array[41] int b_idx_gp_time_byrandom; // gp basis coefficient indices array[n, n_lv] int ytimes_trend; int n_nonmissing; // number of nonmissing observations vector[n_nonmissing] flat_ys; // flattened nonmissing observations matrix[n_nonmissing, num_basis] flat_xs; // X values for nonmissing observations array[n_nonmissing] int obs_ind; // indices of nonmissing observations } transformed data { } parameters { // gp term sd parameters real alpha_gp_trend_time_byrandom; // gp term length scale parameters real rho_gp_trend_time_byrandom; // gp term latent variables vector[k_gp_trend_time_byrandom] z_gp_trend_time_byrandom; // raw basis coefficients vector[num_basis] b_raw; // gp term sd parameters real alpha_gp_time_byrandom; // gp term length scale parameters real rho_gp_time_byrandom; // gp term latent variables vector[k_gp_time_byrandom] z_gp_time_byrandom; vector[num_basis_trend] b_raw_trend; // latent state SD terms vector[n_lv] sigma; // Beta precision parameters vector[n_series] phi; // latent states matrix[n, n_lv] LV; // smoothing parameters vector[n_sp] lambda; vector[n_sp_trend] lambda_trend; } transformed parameters { // latent states and loading matrix vector[n * n_lv] trend_mus; matrix[n, n_series] trend; // basis coefficients vector[num_basis] b; vector[num_basis_trend] b_trend; // observation model basis coefficients b[1:num_basis] = b_raw[1:num_basis]; b[b_idx_gp_time_byrandom] = sqrt(spd_cov_exp_quad(l_gp_time_byrandom, alpha_gp_time_byrandom, rho_gp_time_byrandom)) .* z_gp_time_byrandom; // process model basis coefficients b_trend[1:num_basis_trend] = b_raw_trend[1:num_basis_trend]; b_trend[b_trend_idx_gp_time_byrandom] = sqrt(spd_cov_exp_quad(l_gp_trend_time_byrandom, alpha_gp_trend_time_byrandom, rho_gp_trend_time_byrandom)) .* z_gp_trend_time_byrandom; // latent process linear predictors trend_mus = X_trend * b_trend; // derived latent states for (i in 1:n){ for (s in 1:n_series){ trend[i, s] = dot_product(Z[s,], LV[i,]); } } } model { // prior for (Intercept)... b_raw[1] ~ student_t(3, 0, 2.5); // prior for gp(time):random... z_gp_time_byrandom ~ std_normal(); alpha_gp_time_byrandom ~ student_t(3, 0, 2.5); rho_gp_time_byrandom ~ inv_gamma(2.012803, 0.151413); b_raw[b_idx_gp_time_byrandom] ~ std_normal(); // priors for smoothing parameters lambda ~ normal(5, 30); // priors for latent state SD parameters sigma ~ student_t(3, 0, 2.5); // dynamic process models // prior for gp(time):random_trend... z_gp_trend_time_byrandom ~ std_normal(); alpha_gp_trend_time_byrandom ~ student_t(3, 0, 2.5); rho_gp_trend_time_byrandom ~ inv_gamma(2.012803, 0.151413); b_raw_trend[b_trend_idx_gp_time_byrandom] ~ std_normal(); lambda_trend ~ normal(5, 30); for(j in 1:n_lv){ LV[1, j] ~ normal(trend_mus[ytimes_trend[1, j]], sigma[j]); for(i in 2:n){ LV[i, j] ~ normal(trend_mus[ytimes_trend[i, j]] + LV[i - 1, j] - trend_mus[ytimes_trend[i - 1, j]], sigma[j]); } } // priors for precision parameters phi ~ gamma(0.01, 0.01); { // likelihood functions vector[n_nonmissing] flat_trends; vector[n_nonmissing] flat_phis; flat_trends = (to_vector(trend))[obs_ind]; flat_phis = rep_each(phi, n)[obs_ind]; flat_ys ~ beta( inv_logit(append_col(flat_xs, flat_trends) * append_row(b, 1.0)) .* flat_phis, (1 - inv_logit(append_col(flat_xs, flat_trends) * append_row(b, 1.0))) .* flat_phis); } } generated quantities { vector[total_obs] eta; matrix[n, n_series] phi_vec; matrix[n, n_series] mus; vector[n_sp] rho; vector[n_sp_trend] rho_trend; vector[n_lv] penalty; array[n, n_series] real ypred; rho = log(lambda); rho_trend = log(lambda_trend); penalty = 1.0 / (sigma .* sigma); matrix[n_series, n_lv] lv_coefs = Z; // posterior predictions eta = X * b; for (s in 1:n_series) { phi_vec[1:n,s] = rep_vector(phi[s], n); } for(s in 1:n_series){ mus[1:n, s] = eta[ytimes[1:n, s]] + trend[1:n, s]; ypred[1:n, s] = beta_rng(inv_logit(mus[1:n, s]) .* phi_vec[1:n, s], (1 - inv_logit(mus[1:n, s])) .* phi_vec[1:n, s]); } } GAM formula: y ~ s(season, k = 5) Family: gaussian Link function: identity Trend model: RW N series: 3 N timepoints: 30 Status: Fitted using Stan 1 chains, each with iter = 330; warmup = 300; thin = 1 Total post-warmup draws = 30 Observation error parameter estimates: 2.5% 50% 97.5% Rhat n_eff sigma_obs[1] 0.38 0.51 0.68 1.02 44 sigma_obs[2] 0.20 0.39 0.68 0.97 31 sigma_obs[3] 0.43 0.64 0.96 1.01 33 GAM coefficient (beta) estimates: 2.5% 50% 97.5% Rhat n_eff (Intercept) 0.30 0.55 0.73 1.47 4 s(season).1 0.61 1.10 2.30 1.03 44 s(season).2 -1.40 -0.32 0.33 1.01 40 s(season).3 0.29 1.20 2.50 0.97 44 s(season).4 -1.60 -0.64 -0.14 1.02 44 Approximate significance of GAM smooths: edf Ref.df F p-value s(season) 3.21 4 19.1 1.9e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Latent trend variance estimates: 2.5% 50% 97.5% Rhat n_eff sigma[1] 0.032 0.049 0.064 0.97 29 sigma[2] 0.140 0.270 0.420 1.00 18 sigma[3] 0.062 0.100 0.160 0.99 16 Stan MCMC diagnostics: n_eff / iter looks reasonable for all parameters Rhats above 1.05 found for 110 parameters *Diagnose further to investigate why the chains have not mixed 0 of 30 iterations ended with a divergence (0%) 0 of 30 iterations saturated the maximum tree depth of 12 (0%) E-FMI indicated no pathological behavior Samples were drawn using NUTS(diag_e) at Mon Apr 22 11:36:14 AM 2024. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split MCMC chains (at convergence, Rhat = 1) GAM observation formula: y ~ 1 GAM process formula: ~s(season, k = 5) Family: gaussian Link function: identity Trend model: RW(cor = TRUE) N process models: 3 N series: 3 N timepoints: 30 Status: Fitted using Stan 1 chains, each with iter = 330; warmup = 300; thin = 1 Total post-warmup draws = 30 Observation error parameter estimates: 2.5% 50% 97.5% Rhat n_eff sigma_obs[1] 0.41 0.54 0.69 0.99 13 sigma_obs[2] 0.34 0.48 0.62 0.98 24 sigma_obs[3] 0.52 0.66 0.80 1.06 12 GAM observation model coefficient (beta) estimates: 2.5% 50% 97.5% Rhat n_eff (Intercept) 0.36 0.5 0.58 1.01 29 Process error parameter estimates: 2.5% 50% 97.5% Rhat n_eff sigma[1] 0.0040 0.0067 0.010 1.33 9 sigma[2] 0.0720 0.1300 0.240 1.25 9 sigma[3] 0.0051 0.0097 0.045 0.99 7 GAM process model coefficient (beta) estimates: 2.5% 50% 97.5% Rhat n_eff s(season).1_trend 0.73 1.30 1.70 1.06 13 s(season).2_trend -2.00 -0.86 0.39 0.97 17 s(season).3_trend 0.27 1.80 3.10 0.97 17 s(season).4_trend -1.20 -0.73 -0.11 1.03 18 Approximate significance of GAM process smooths: edf Ref.df F p-value s(season) 3.66 4 10.5 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Stan MCMC diagnostics: n_eff / iter looks reasonable for all parameters Rhats above 1.05 found for 46 parameters *Diagnose further to investigate why the chains have not mixed 0 of 30 iterations ended with a divergence (0%) 0 of 30 iterations saturated the maximum tree depth of 12 (0%) E-FMI indicated no pathological behavior Samples were drawn using NUTS(diag_e) at Mon Apr 22 11:37:18 AM 2024. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split MCMC chains (at convergence, Rhat = 1) GAM formula: y ~ s(season, k = 5) Family: gaussian Link function: identity Trend model: VAR1cor N series: 3 N timepoints: 30 Status: Fitted using Stan 1 chains, each with iter = 330; warmup = 300; thin = 1 Total post-warmup draws = 30 Observation error parameter estimates: 2.5% 50% 97.5% Rhat n_eff sigma_obs[1] 0.260 0.45 0.61 1.04 19 sigma_obs[2] 0.069 0.14 0.33 1.04 6 sigma_obs[3] 0.140 0.48 0.79 1.15 8 GAM coefficient (beta) estimates: 2.5% 50% 97.5% Rhat n_eff (Intercept) 0.340 0.5500 0.86 1.85 4 s(season).1 -0.050 0.0200 0.33 0.97 41 s(season).2 -0.150 0.0082 0.31 1.00 23 s(season).3 -0.400 0.0290 0.70 0.98 28 s(season).4 -0.044 0.1800 0.42 1.01 15 Approximate significance of GAM smooths: edf Ref.df F p-value s(season) 0.858 4 3.84 0.73 Latent trend VAR parameter estimates: 2.5% 50% 97.5% Rhat n_eff A[1,1] -0.130 0.280 0.63 1.13 44 A[1,2] -0.220 0.120 0.38 0.97 44 A[1,3] 0.013 0.210 0.56 0.98 31 A[2,1] 0.055 0.540 1.40 0.99 28 A[2,2] -0.450 0.140 0.58 1.05 32 A[2,3] -0.220 0.200 0.66 0.98 28 A[3,1] -0.700 0.130 0.97 0.97 12 A[3,2] -0.140 0.160 0.93 1.02 10 A[3,3] -0.280 0.260 0.71 1.32 9 Sigma[1,1] 0.061 0.130 0.43 0.97 16 Sigma[1,2] 0.017 0.093 0.24 0.97 13 Sigma[1,3] -0.071 0.066 0.24 0.97 23 Sigma[2,1] 0.017 0.093 0.24 0.97 13 Sigma[2,2] 0.180 0.320 0.54 1.01 12 Sigma[2,3] 0.028 0.170 0.36 0.97 24 Sigma[3,1] -0.071 0.066 0.24 0.97 23 Sigma[3,2] 0.028 0.170 0.36 0.97 24 Sigma[3,3] 0.190 0.350 0.76 0.99 21 Stan MCMC diagnostics: n_eff / iter looks reasonable for all parameters Rhats above 1.05 found for 89 parameters *Diagnose further to investigate why the chains have not mixed 0 of 30 iterations ended with a divergence (0%) 0 of 30 iterations saturated the maximum tree depth of 12 (0%) E-FMI indicated no pathological behavior Samples were drawn using NUTS(diag_e) at Mon Apr 22 11:39:12 AM 2024. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split MCMC chains (at convergence, Rhat = 1) GAM observation formula: y ~ series GAM process formula: ~s(season, k = 5) Family: gaussian Link function: identity Trend model: VAR(ma = TRUE, cor = TRUE) N process models: 3 N series: 3 N timepoints: 30 Status: Fitted using Stan 1 chains, each with iter = 330; warmup = 300; thin = 1 Total post-warmup draws = 30 Observation error parameter estimates: 2.5% 50% 97.5% Rhat n_eff sigma_obs[1] 0.36 0.45 0.66 1.02 22 sigma_obs[2] 0.22 0.39 0.56 0.97 12 sigma_obs[3] 0.45 0.63 0.81 1.09 15 GAM observation model coefficient (beta) estimates: 2.5% 50% 97.5% Rhat n_eff (Intercept) 0.33 0.52 0.670 1.01 33 seriesseries_2 -0.44 -0.13 0.100 0.97 44 seriesseries_3 -0.35 -0.16 0.089 0.98 44 Process model VAR parameter estimates: 2.5% 50% 97.5% Rhat n_eff A[1,1] -0.63 -0.0063 0.92 0.99 31 A[1,2] -0.51 0.0170 0.76 1.05 18 A[1,3] -0.68 0.1000 1.60 1.22 17 A[2,1] -1.70 0.4000 3.20 0.99 7 A[2,2] -0.78 0.0510 0.85 1.14 14 A[2,3] -1.70 0.3100 2.20 1.19 8 A[3,1] -0.67 0.1200 1.50 0.97 22 A[3,2] -0.36 0.0400 0.97 0.98 42 A[3,3] -0.77 0.0990 0.88 0.97 32 Process error parameter estimates: 2.5% 50% 97.5% Rhat n_eff Sigma[1,1] 0.0016 0.0130 0.083 1.19 7 Sigma[1,2] -0.0180 0.0029 0.020 0.97 17 Sigma[1,3] -0.0210 -0.0030 0.016 1.11 18 Sigma[2,1] -0.0180 0.0029 0.020 0.97 17 Sigma[2,2] 0.0095 0.0670 0.210 1.03 4 Sigma[2,3] -0.0120 0.0055 0.048 1.01 12 Sigma[3,1] -0.0210 -0.0030 0.016 1.11 18 Sigma[3,2] -0.0120 0.0055 0.048 1.01 12 Sigma[3,3] 0.0041 0.0140 0.046 1.45 7 theta[1,1] -1.0000 -0.0780 0.990 0.98 26 theta[1,2] -1.5000 -0.0900 0.240 1.03 23 theta[1,3] -3.4000 -0.0670 1.000 1.04 19 theta[2,1] -2.4000 -0.2700 1.400 0.98 26 theta[2,2] -0.8700 0.5400 1.700 0.98 20 theta[2,3] -3.2000 -0.6900 1.500 0.99 26 theta[3,1] -2.1000 0.0660 2.400 1.01 39 theta[3,2] -0.5800 0.0750 1.300 0.97 44 theta[3,3] -1.9000 -0.2200 1.200 0.98 23 GAM process model coefficient (beta) estimates: 2.5% 50% 97.5% Rhat n_eff s(season).1_trend 0.69 1.40 2.20 0.98 20 s(season).2_trend -1.70 -0.54 0.27 1.24 14 s(season).3_trend 0.33 1.50 2.80 1.26 14 s(season).4_trend -1.50 -0.79 -0.22 1.02 15 Approximate significance of GAM process smooths: edf Ref.df F p-value s(season) 2.65 4 13.4 1.5e-06 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Stan MCMC diagnostics: n_eff / iter looks reasonable for all parameters Rhats above 1.05 found for 71 parameters *Diagnose further to investigate why the chains have not mixed 0 of 30 iterations ended with a divergence (0%) 0 of 30 iterations saturated the maximum tree depth of 12 (0%) E-FMI indicated no pathological behavior Samples were drawn using NUTS(diag_e) at Mon Apr 22 11:42:13 AM 2024. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split MCMC chains (at convergence, Rhat = 1) GAM formula: y ~ series + s(season, k = 5) Family: gaussian Link function: identity Trend model: GP() N latent factors: 2 N series: 3 N timepoints: 30 Status: Fitted using Stan 1 chains, each with iter = 330; warmup = 300; thin = 1 Total post-warmup draws = 30 Observation error parameter estimates: 2.5% 50% 97.5% Rhat n_eff sigma_obs[1] 0.42 0.51 0.64 0.98 44 sigma_obs[2] 0.32 0.43 0.61 0.97 23 sigma_obs[3] 0.47 0.64 0.84 1.00 44 GAM coefficient (beta) estimates: 2.5% 50% 97.5% Rhat n_eff (Intercept) 0.290 0.54 0.96 0.97 24 seriesseries_2 -0.520 -0.16 0.13 1.07 22 seriesseries_3 -0.750 -0.16 0.18 0.98 28 s(season).1 -0.069 0.73 1.90 0.98 13 s(season).2 -1.200 -0.36 0.26 1.03 26 s(season).3 0.068 1.00 2.30 1.03 12 s(season).4 -1.400 -0.33 0.19 0.97 17 Approximate significance of GAM smooths: edf Ref.df F p-value s(season) 3.37 4 15.4 0.019 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Latent trend length scale (rho) estimates: 2.5% 50% 97.5% Rhat n_eff rho_gp[1] 1.4 2.5 9.9 0.97 17 rho_gp[2] 1.6 3.9 15.0 0.97 16 Stan MCMC diagnostics: n_eff / iter looks reasonable for all parameters Rhats above 1.05 found for 38 parameters *Diagnose further to investigate why the chains have not mixed 2 of 30 iterations ended with a divergence (6.6667%) *Try running with larger adapt_delta to remove the divergences 0 of 30 iterations saturated the maximum tree depth of 12 (0%) E-FMI indicated no pathological behavior Samples were drawn using NUTS(diag_e) at Mon Apr 22 11:47:50 AM 2024. For each parameter, n_eff is a crude measure of effective sample size, and Rhat is the potential scale reduction factor on split MCMC chains (at convergence, Rhat = 1) GAM formula: y ~ 1 Family: poisson Link function: log Trend model: PW() N series: 3 N timepoints: 75 Status: Not fitted GAM formula: y ~ 1 Family: negative binomial Link function: log Trend model: PW() N series: 3 N timepoints: 75 Status: Not fitted GAM formula: y ~ 1 Family: lognormal Link function: identity Trend model: PW() N series: 3 N timepoints: 75 Status: Not fitted GAM formula: y ~ 1 Family: bernoulli Link function: logit Trend model: PW() N series: 3 N timepoints: 75 Status: Not fitted GAM formula: y ~ 1 Family: student Link function: identity Trend model: PW() N series: 3 N timepoints: 75 Status: Not fitted GAM formula: y ~ 1 Family: Gamma Link function: log Trend model: PW() N series: 3 N timepoints: 75 Status: Not fitted GAM formula: y ~ 1 Family: beta Link function: logit Trend model: PW() N series: 3 N timepoints: 75 Status: Not fitted // Stan model code generated by package mvgam functions { vector rep_each(vector x, int K) { int N = rows(x); vector[N * K] y; int pos = 1; for (n in 1:N) { for (k in 1:K) { y[pos] = x[n]; pos += 1; } } return y; } real partial_log_lik(array[] int seq, int start, int end, data vector Y, matrix X, vector b, vector sigma_obs, real alpha) { real ptarget = 0; ptarget += normal_id_glm_lpdf(Y[start:end] | X[start:end], alpha, b, sigma_obs[start:end]); return ptarget; } } data { int total_obs; // total number of observations int n; // number of timepoints per series int n_sp; // number of smoothing parameters int n_series; // number of series int num_basis; // total number of basis coefficients vector[num_basis] zero; // prior locations for basis coefficients matrix[total_obs, num_basis] X; // mgcv GAM design matrix array[n, n_series] int ytimes; // time-ordered matrix (which col in X belongs to each [time, series] observation?) matrix[9,18] S1; // mgcv smooth penalty matrix S1 int n_nonmissing; // number of nonmissing observations vector[n_nonmissing] flat_ys; // flattened nonmissing observations matrix[n_nonmissing, num_basis] flat_xs; // X values for nonmissing observations array[n_nonmissing] int obs_ind; // indices of nonmissing observations int grainsize; // grainsize for reduce_sum threading array[n_nonmissing] int seq; // an integer sequence for reduce_sum slicing } parameters { // raw basis coefficients vector[num_basis] b_raw; // gaussian observation error vector[n_series] sigma_obs; // latent trend AR1 terms vector[n_series] ar1; // latent trend AR2 terms vector[n_series] ar2; // latent trend drift terms vector[n_series] drift; // latent trend variance parameters vector[n_series] sigma; // latent trends matrix[n, n_series] trend; // smoothing parameters vector[n_sp] lambda; } transformed parameters { // basis coefficients vector[num_basis] b; b[1:num_basis] = b_raw[1:num_basis]; } model { // prior for (Intercept)... b_raw[1] ~ student_t(3, 0.1, 2.5); // prior for s(season)... b_raw[2:10] ~ multi_normal_prec(zero[2:10],S1[1:9,1:9] * lambda[1] + S1[1:9,10:18] * lambda[2]); // priors for AR parameters ar1 ~ std_normal(); ar2 ~ std_normal(); drift ~ std_normal(); // priors for smoothing parameters lambda ~ normal(5, 30); // priors for latent trend variance parameters sigma ~ student_t(3, 0, 2.5); // trend estimates trend[1, 1:n_series] ~ normal(0, sigma); trend[2, 1:n_series] ~ normal(drift + trend[1, 1:n_series] * ar1, sigma); for(s in 1:n_series){ trend[3:n, s] ~ normal(drift[s] + ar1[s] * trend[2:(n - 1), s] + ar2[s] * trend[1:(n - 2), s], sigma[s]); } // priors for observation error parameters sigma_obs ~ student_t(3, 0, 2.5); } generated quantities { vector[total_obs] eta; matrix[n, n_series] sigma_obs_vec; matrix[n, n_series] mus; vector[n_sp] rho; vector[n_series] tau; array[n, n_series] real ypred; rho = log(lambda); for (s in 1:n_series) { tau[s] = pow(sigma[s], -2.0); } // posterior predictions eta = X * b; for (s in 1:n_series) { sigma_obs_vec[1:n,s] = rep_vector(sigma_obs[s], n); } for(s in 1:n_series){ mus[1:n, s] = eta[ytimes[1:n, s]] + trend[1:n, s]; ypred[1:n, s] = normal_rng(mus[1:n, s], sigma_obs_vec[1:n, s]); } } // Stan model code generated by package mvgam functions { /* Function to compute the matrix square root */ /* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/ matrix sqrtm(matrix A) { int m = rows(A); vector[m] root_root_evals = sqrt(sqrt(eigenvalues_sym(A))); matrix[m, m] evecs = eigenvectors_sym(A); matrix[m, m] eprod = diag_post_multiply(evecs, root_root_evals); return tcrossprod(eprod); } /* Function to transform P_real to P */ /* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/ matrix P_realtoP(matrix P_real) { int m = rows(P_real); matrix[m, m] B = tcrossprod(P_real); for(i in 1:m) B[i, i] += 1.0; return mdivide_left_spd(sqrtm(B), P_real); } /* Function to compute Kronecker product */ /* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/ matrix kronecker_prod(matrix A, matrix B) { matrix[rows(A) * rows(B), cols(A) * cols(B)] C; int m = rows(A); int n = cols(A); int p = rows(B); int q = cols(B); for (i in 1:m) { for (j in 1:n) { int row_start = (i - 1) * p + 1; int row_end = (i - 1) * p + p; int col_start = (j - 1) * q + 1; int col_end = (j - 1) * q + q; C[row_start:row_end, col_start:col_end] = A[i, j] * B; } } return C; } /* Function to perform the reverse mapping /* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/ array[] matrix rev_mapping(array[] matrix P, matrix Sigma) { int p = size(P); int m = rows(Sigma); array[p, p] matrix[m, m] phi_for; array[p, p] matrix[m, m] phi_rev; array[p+1] matrix[m, m] Sigma_for; array[p+1] matrix[m, m] Sigma_rev; matrix[m, m] S_for; matrix[m, m] S_rev; array[p+1] matrix[m, m] S_for_list; // Step 1: Sigma_for[p+1] = Sigma; S_for_list[p+1] = sqrtm(Sigma); for(s in 1:p) { // In this block of code S_rev is B^{-1} and S_for is a working matrix S_for = - tcrossprod(P[p-s+1]); for(i in 1:m) S_for[i, i] += 1.0; S_rev = sqrtm(S_for); S_for_list[p-s+1] = mdivide_right_spd(mdivide_left_spd(S_rev, sqrtm(quad_form_sym(Sigma_for[p-s+2], S_rev))), S_rev); Sigma_for[p-s+1] = tcrossprod(S_for_list[p-s+1]); } // Step 2: Sigma_rev[1] = Sigma_for[1]; for(s in 0:(p-1)) { S_for = S_for_list[s+1]; S_rev = sqrtm(Sigma_rev[s+1]); phi_for[s+1, s+1] = mdivide_right_spd(S_for * P[s+1], S_rev); phi_rev[s+1, s+1] = mdivide_right_spd(S_rev * P[s+1]', S_for); if(s>=1) { for(k in 1:s) { phi_for[s+1, k] = phi_for[s, k] - phi_for[s+1, s+1] * phi_rev[s, s-k+1]; phi_rev[s+1, k] = phi_rev[s, k] - phi_rev[s+1, s+1] * phi_for[s, s-k+1]; } } Sigma_rev[s+2] = Sigma_rev[s+1] - quad_form_sym(Sigma_for[s+1], phi_rev[s+1, s+1]'); } return phi_for[p]; } /* Function to compute the joint (stationary) distribution of (y_0, ..., y_{1-p}, eps_0, ..., eps_{1-q}) /* see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648)*/ matrix initial_joint_var(matrix Sigma, array[] matrix phi, array[] matrix theta) { int p = size(phi); int q = size(theta); int m = rows(Sigma); matrix[(p+q)*m, (p+q)*m] companion_mat = rep_matrix(0.0, (p+q)*m, (p+q)*m); matrix[(p+q)*m, (p+q)*m] companion_var = rep_matrix(0.0, (p+q)*m, (p+q)*m); matrix[(p+q)*m*(p+q)*m, (p+q)*m*(p+q)*m] tmp = diag_matrix(rep_vector(1.0, (p+q)*m*(p+q)*m)); matrix[(p+q)*m, (p+q)*m] Omega; // Construct phi_tilde: for(i in 1:p) { companion_mat[1:m, ((i-1)*m+1):(i*m)] = phi[i]; if(i>1) { for(j in 1:m) { companion_mat[(i-1)*m+j, (i-2)*m+j] = 1.0; } } } for(i in 1:q) { companion_mat[1:m, ((p+i-1)*m+1):((p+i)*m)] = theta[i]; } if(q>1) { for(i in 2:q) { for(j in 1:m) { companion_mat[(p+i-1)*m+j, (p+i-2)*m+j] = 1.0; } } } // Construct Sigma_tilde: companion_var[1:m, 1:m] = Sigma; companion_var[(p*m+1):((p+1)*m), (p*m+1):((p+1)*m)] = Sigma; companion_var[1:m, (p*m+1):((p+1)*m)] = Sigma; companion_var[(p*m+1):((p+1)*m), 1:m] = Sigma; // Compute Gamma0_tilde tmp -= kronecker_prod(companion_mat, companion_mat); Omega = to_matrix(tmp \ to_vector(companion_var), (p+q)*m, (p+q)*m); // Ensure Omega is symmetric: for(i in 1:(rows(Omega)-1)) { for(j in (i+1):rows(Omega)) { Omega[j, i] = Omega[i, j]; } } return Omega; } vector rep_each(vector x, int K) { int N = rows(x); vector[N * K] y; int pos = 1; for (n in 1:N) { for (k in 1:K) { y[pos] = x[n]; pos += 1; } } return y; } real partial_log_lik(array[] int seq, int start, int end, data vector Y, matrix X, vector b, vector sigma_obs, real alpha) { real ptarget = 0; ptarget += normal_id_glm_lpdf(Y[start:end] | X[start:end], alpha, b, sigma_obs[start:end]); return ptarget; } } data { int total_obs; // total number of observations int n; // number of timepoints per series int n_sp_trend; // number of trend smoothing parameters int n_lv; // number of dynamic factors int n_series; // number of series matrix[n_series, n_lv] Z; // matrix mapping series to latent states int num_basis; // total number of basis coefficients int num_basis_trend; // number of trend basis coefficients vector[num_basis_trend] zero_trend; // prior locations for trend basis coefficients matrix[total_obs, num_basis] X; // mgcv GAM design matrix matrix[n * n_lv, num_basis_trend] X_trend; // trend model design matrix array[n, n_series] int ytimes; // time-ordered matrix (which col in X belongs to each [time, series] observation?) array[n, n_lv] int ytimes_trend; int n_nonmissing; // number of nonmissing observations matrix[9,18] S_trend1; // mgcv smooth penalty matrix S_trend1 vector[n_nonmissing] flat_ys; // flattened nonmissing observations matrix[n_nonmissing, num_basis] flat_xs; // X values for nonmissing observations array[n_nonmissing] int obs_ind; // indices of nonmissing observations array[3] int trend_rand_idxs; // trend random effect indices int grainsize; // grainsize for reduce_sum threading array[n_nonmissing] int seq; // an integer sequence for reduce_sum slicing } transformed data { vector[n_lv] trend_zeros = rep_vector(0.0, n_lv); vector[n_lv*2] init_zeros = rep_vector(0.0, n_lv*2); // exchangeable partial autocorrelation hyperparameters // see Heaps 2022 for details (https://doi.org/10.1080/10618600.2022.2079648) vector[2] es; vector[2] fs; vector[2] gs; vector[2] hs; es[1] = 0; es[2] = 0; fs[1] = sqrt(0.455); fs[2] = sqrt(0.455); gs[1] = 1.365; gs[2] = 1.365; hs[1] = 0.071175; hs[2] = 0.071175; } parameters { // raw basis coefficients vector[num_basis] b_raw; vector[num_basis_trend] b_raw_trend; // trend random effects vector[1] sigma_raw_trend; vector[1] mu_raw_trend; // gaussian observation error vector[n_series] sigma_obs; // latent state variance parameters cholesky_factor_corr[n_lv] L_Omega; vector[n_lv] sigma; // unconstrained VAR1 partial autocorrelations matrix[n_lv, n_lv] P_real; // unconstrained MA partial autocorrelations matrix[n_lv, n_lv] R_real; // initial joint stationary VARMA process vector[2 * n_lv] init; // ma error parameters array[n] vector[n_lv] error; // partial autocorrelation hyperparameters vector[2] Pmu; vector[2] Pomega; // smoothing parameters vector[n_sp_trend] lambda_trend; } transformed parameters { // latent state VAR1 autoregressive terms matrix[n_lv, n_lv] A; // latent trend MA autoregressive terms matrix[n_lv, n_lv] theta; // ma process array[n] vector[n_lv] epsilon; // LKJ form of covariance matrix matrix[n_lv, n_lv] L_Sigma; // computed error covariance matrix cov_matrix[n_lv] Sigma; // initial trend covariance cov_matrix[n_lv * 2] Omega; // latent states array[n] vector[n_lv] LV; // basis coefficients vector[num_basis] b; vector[num_basis_trend] b_trend; // latent states and loading matrix vector[n * n_lv] trend_mus; matrix[n, n_series] trend; matrix[n_series, n_lv] lv_coefs; // process model basis coefficients b_trend[1:9] = b_raw_trend[1:9]; b_trend[10:12] = mu_raw_trend[1] + b_raw_trend[10:12] * sigma_raw_trend[1]; // latent process linear predictors trend_mus = X_trend * b_trend; // stationary VARMA reparameterisation L_Sigma = diag_pre_multiply(sigma, L_Omega); Sigma = multiply_lower_tri_self_transpose(L_Sigma); { // constrained partial autocorrelations array[1] matrix[n_lv, n_lv] P; array[1] matrix[n_lv, n_lv] R; // stationary autoregressive coefficients array[1] matrix[n_lv, n_lv] A_init; array[1] matrix[n_lv, n_lv] theta_init; P[1] = P_realtoP(P_real); R[1] = P_realtoP(R_real); // stationary autoregressive and ma coef matrices A_init = rev_mapping(P, Sigma); theta_init = rev_mapping(R, Sigma); theta_init[1] = -theta_init[1]; // initial stationary covariance structure Omega = initial_joint_var(Sigma, A_init, theta_init); A = A_init[1]; theta = theta_init[1]; } // computed VARMA trends epsilon[1] = theta * init[(n_lv + 1) : (n_lv * 2)]; LV[1] = (A * init[1 : n_lv]) + trend_mus[ytimes_trend[1, 1 : n_lv]] + epsilon[1] + error[1]; for (i in 2 : n) { // lagged error ma process epsilon[i] = theta * error[i - 1]; // full VARMA process LV[i] = trend_mus[ytimes_trend[i, 1 : n_lv]] + A * (LV[i - 1] - trend_mus[ytimes_trend[i - 1, 1 : n_lv]]) + epsilon[i] + error[i]; } // derived latent states lv_coefs = Z; for (i in 1 : n) { for (s in 1 : n_series) { trend[i, s] = dot_product(lv_coefs[s, : ], LV[i]); } } // observation model basis coefficients b[1:num_basis] = b_raw[1:num_basis]; } model { // prior for (Intercept)... b_raw[1] ~ student_t(3, 0.1, 2.5); // priors for latent state variance parameters sigma ~ student_t(3, 0, 2.5); // LKJ error correlation prior L_Omega ~ lkj_corr_cholesky(2); // partial autocorrelation hyperpriors Pmu ~ normal(es, fs); Pomega ~ gamma(gs, hs); // unconstrained partial autocorrelations diagonal(P_real) ~ normal(Pmu[1], 1 / sqrt(Pomega[1])); for(i in 1:n_lv) { for(j in 1:n_lv) { if(i != j) P_real[i, j] ~ normal(Pmu[2], 1 / sqrt(Pomega[2])); } } // unconstrained ma inverse partial autocorrelations diagonal(R_real) ~ std_normal(); for (i in 1 : n_lv) { for (j in 1 : n_lv) { if (i != j) R_real[i, j] ~ std_normal(); } } // initial joint stationary distribution init ~ multi_normal(init_zeros, Omega); // correlated contemporaneous errors for (i in 1 : n) { error[i] ~ multi_normal_cholesky(trend_zeros, L_Sigma); } // dynamic process models // prior for s(season)_trend... b_raw_trend[1:9] ~ multi_normal_prec(zero_trend[1:9],S_trend1[1:9,1:9] * lambda_trend[1] + S_trend1[1:9,10:18] * lambda_trend[2]); lambda_trend ~ normal(5, 30); sigma_raw_trend ~ student_t(3, 0, 2.5); mu_raw_trend ~ std_normal(); b_raw_trend[trend_rand_idxs] ~ std_normal(); // priors for observation error parameters sigma_obs ~ student_t(3, 0, 2.5); } generated quantities { vector[total_obs] eta; matrix[n, n_series] sigma_obs_vec; matrix[n, n_series] mus; vector[n_sp_trend] rho_trend; array[n, n_series] real ypred; rho_trend = log(lambda_trend); // posterior predictions eta = X * b; for (s in 1:n_series) { sigma_obs_vec[1:n,s] = rep_vector(sigma_obs[s], n); } for(s in 1:n_series){ mus[1:n, s] = eta[ytimes[1:n, s]] + trend[1:n, s]; ypred[1:n, s] = normal_rng(mus[1:n, s], sigma_obs_vec[1:n, s]); } } // Stan model code generated by package mvgam functions { vector rep_each(vector x, int K) { int N = rows(x); vector[N * K] y; int pos = 1; for (n in 1:N) { for (k in 1:K) { y[pos] = x[n]; pos += 1; } } return y; } real partial_log_lik(array[] int seq, int start, int end, data vector Y, matrix X, vector b, vector sigma_obs, real alpha) { real ptarget = 0; ptarget += normal_id_glm_lpdf(Y[start:end] | X[start:end], alpha, b, sigma_obs[start:end]); return ptarget; } } data { int total_obs; // total number of observations int n; // number of timepoints per series int n_sp_trend; // number of trend smoothing parameters int n_lv; // number of dynamic factors int n_series; // number of series matrix[n_series, n_lv] Z; // matrix mapping series to latent states int num_basis; // total number of basis coefficients int num_basis_trend; // number of trend basis coefficients vector[num_basis_trend] zero_trend; // prior locations for trend basis coefficients matrix[total_obs, num_basis] X; // mgcv GAM design matrix matrix[n * n_lv, num_basis_trend] X_trend; // trend model design matrix array[n, n_series] int ytimes; // time-ordered matrix (which col in X belongs to each [time, series] observation?) array[n, n_lv] int ytimes_trend; array[n, n_series] real time_dis; // temporal distances between observations int n_nonmissing; // number of nonmissing observations matrix[9,18] S_trend1; // mgcv smooth penalty matrix S_trend1 vector[n_nonmissing] flat_ys; // flattened nonmissing observations matrix[n_nonmissing, num_basis] flat_xs; // X values for nonmissing observations array[n_nonmissing] int obs_ind; // indices of nonmissing observations array[3] int trend_rand_idxs; // trend random effect indices int grainsize; // grainsize for reduce_sum threading array[n_nonmissing] int seq; // an integer sequence for reduce_sum slicing } transformed data { } parameters { // raw basis coefficients vector[num_basis] b_raw; vector[num_basis_trend] b_raw_trend; // trend random effects vector[1] sigma_raw_trend; vector[1] mu_raw_trend; // latent state SD terms vector[n_lv] sigma; // gaussian observation error vector[n_series] sigma_obs; // latent state AR1 terms vector[n_lv] ar1; // latent states matrix[n, n_lv] LV; // smoothing parameters vector[n_sp_trend] lambda_trend; } transformed parameters { // latent states and loading matrix vector[n * n_lv] trend_mus; matrix[n, n_series] trend; // basis coefficients vector[num_basis] b; vector[num_basis_trend] b_trend; // observation model basis coefficients b[1:num_basis] = b_raw[1:num_basis]; // process model basis coefficients b_trend[1:9] = b_raw_trend[1:9]; b_trend[10:12] = mu_raw_trend[1] + b_raw_trend[10:12] * sigma_raw_trend[1]; // latent process linear predictors trend_mus = X_trend * b_trend; // derived latent states for (i in 1:n){ for (s in 1:n_series){ trend[i, s] = dot_product(Z[s,], LV[i,]); } } } model { // prior for (Intercept)... b_raw[1] ~ student_t(3, 0.1, 2.5); // priors for AR parameters ar1 ~ std_normal(); // priors for latent state SD parameters sigma ~ student_t(3, 0, 2.5); // dynamic process models // prior for s(season)_trend... b_raw_trend[1:9] ~ multi_normal_prec(zero_trend[1:9],S_trend1[1:9,1:9] * lambda_trend[1] + S_trend1[1:9,10:18] * lambda_trend[2]); lambda_trend ~ normal(5, 30); sigma_raw_trend ~ student_t(3, 0, 2.5); mu_raw_trend ~ std_normal(); b_raw_trend[trend_rand_idxs] ~ std_normal(); for(j in 1:n_lv){ LV[1, j] ~ normal(trend_mus[ytimes_trend[1, j]], sigma[j]); for(i in 2:n){ LV[i, j] ~ normal(trend_mus[ytimes_trend[i, j]] + pow(ar1[j], time_dis[i, j]) * (LV[i - 1, j] - trend_mus[ytimes_trend[i - 1, j]]), sigma[j]); } } // priors for observation error parameters sigma_obs ~ student_t(3, 0, 2.5); } generated quantities { vector[total_obs] eta; matrix[n, n_series] sigma_obs_vec; matrix[n, n_series] mus; vector[n_sp_trend] rho_trend; vector[n_lv] penalty; array[n, n_series] real ypred; penalty = 1.0 / (sigma .* sigma); rho_trend = log(lambda_trend); matrix[n_series, n_lv] lv_coefs = Z; // posterior predictions eta = X * b; for (s in 1:n_series) { sigma_obs_vec[1:n,s] = rep_vector(sigma_obs[s], n); } for(s in 1:n_series){ mus[1:n, s] = eta[ytimes[1:n, s]] + trend[1:n, s]; ypred[1:n, s] = normal_rng(mus[1:n, s], sigma_obs_vec[1:n, s]); } } // Stan model code generated by package mvgam functions { vector rep_each(vector x, int K) { int N = rows(x); vector[N * K] y; int pos = 1; for (n in 1:N) { for (k in 1:K) { y[pos] = x[n]; pos += 1; } } return y; } } data { int total_obs; // total number of observations int n; // number of timepoints per series int n_sp; // number of smoothing parameters int n_series; // number of series int num_basis; // total number of basis coefficients vector[num_basis] zero; // prior locations for basis coefficients matrix[total_obs, num_basis] X; // mgcv GAM design matrix array[n, n_series] int ytimes; // time-ordered matrix (which col in X belongs to each [time, series] observation?) matrix[9,18] S1; // mgcv smooth penalty matrix S1 int n_nonmissing; // number of nonmissing observations vector[n_nonmissing] flat_ys; // flattened nonmissing observations matrix[n_nonmissing, num_basis] flat_xs; // X values for nonmissing observations array[n_nonmissing] int obs_ind; // indices of nonmissing observations } parameters { // raw basis coefficients vector[num_basis] b_raw; // Beta precision parameters vector[n_series] phi; // latent trend AR1 terms vector[n_series] ar1; // latent trend AR2 terms vector[n_series] ar2; // latent trend AR3 terms vector[n_series] ar3; // latent trend drift terms vector[n_series] drift; // latent trend variance parameters vector[n_series] sigma; // latent trends matrix[n, n_series] trend; // smoothing parameters vector[n_sp] lambda; } transformed parameters { // basis coefficients vector[num_basis] b; b[1:num_basis] = b_raw[1:num_basis]; } model { // prior for (Intercept)... b_raw[1] ~ student_t(3, 0, 2.5); // prior for s(season)... b_raw[2:10] ~ multi_normal_prec(zero[2:10],S1[1:9,1:9] * lambda[1] + S1[1:9,10:18] * lambda[2]); // priors for AR parameters ar1 ~ std_normal(); ar2 ~ std_normal(); ar3 ~ std_normal(); drift ~ std_normal(); // priors for smoothing parameters lambda ~ normal(5, 30); // priors for latent trend variance parameters sigma ~ student_t(3, 0, 2.5); // trend estimates trend[1, 1:n_series] ~ normal(0, sigma); trend[2, 1:n_series] ~ normal(drift + trend[1, 1:n_series] * ar1, sigma); trend[3, 1:n_series] ~ normal(drift + trend[2, 1:n_series] * ar1 + trend[1, 1:n_series] * ar2, sigma); for(s in 1:n_series){ trend[4:n, s] ~ normal(drift[s] + ar1[s] * trend[3:(n - 1), s] + ar2[s] * trend[2:(n - 2), s] + ar3[s] * trend[1:(n - 3), s], sigma[s]); } // priors for precision parameters phi ~ gamma(0.01, 0.01); } generated quantities { vector[total_obs] eta; matrix[n, n_series] phi_vec; matrix[n, n_series] mus; vector[n_sp] rho; vector[n_series] tau; array[n, n_series] real ypred; rho = log(lambda); for (s in 1:n_series) { tau[s] = pow(sigma[s], -2.0); } // posterior predictions eta = X * b; for (s in 1:n_series) { phi_vec[1:n,s] = rep_vector(phi[s], n); } for(s in 1:n_series){ mus[1:n, s] = eta[ytimes[1:n, s]] + trend[1:n, s]; ypred[1:n, s] = beta_rng(inv_logit(mus[1:n, s]) .* phi_vec[1:n, s], (1 - inv_logit(mus[1:n, s])) .* phi_vec[1:n, s]); } } // Stan model code generated by package mvgam functions { vector rep_each(vector x, int K) { int N = rows(x); vector[N * K] y; int pos = 1; for (n in 1:N) { for (k in 1:K) { y[pos] = x[n]; pos += 1; } } return y; } } data { int total_obs; // total number of observations int n; // number of timepoints per series int n_sp_trend; // number of trend smoothing parameters int n_lv; // number of dynamic factors int n_series; // number of series matrix[n_series, n_lv] Z; // matrix mapping series to latent states int num_basis; // total number of basis coefficients int num_basis_trend; // number of trend basis coefficients vector[num_basis_trend] zero_trend; // prior locations for trend basis coefficients matrix[total_obs, num_basis] X; // mgcv GAM design matrix matrix[n * n_lv, num_basis_trend] X_trend; // trend model design matrix array[n, n_series] int ytimes; // time-ordered matrix (which col in X belongs to each [time, series] observation?) array[n, n_lv] int ytimes_trend; int n_nonmissing; // number of nonmissing observations matrix[9,18] S_trend1; // mgcv smooth penalty matrix S_trend1 vector[n_nonmissing] flat_ys; // flattened nonmissing observations matrix[n_nonmissing, num_basis] flat_xs; // X values for nonmissing observations array[n_nonmissing] int obs_ind; // indices of nonmissing observations array[3] int trend_rand_idxs; // trend random effect indices } transformed data { vector[n_lv] trend_zeros = rep_vector(0.0, n_lv); } parameters { // raw basis coefficients vector[num_basis] b_raw; vector[num_basis_trend] b_raw_trend; // trend random effects vector[1] sigma_raw_trend; vector[1] mu_raw_trend; // latent state SD terms vector[n_lv] sigma; cholesky_factor_corr[n_lv] L_Omega; // Beta precision parameters vector[n_series] phi; // latent state AR1 terms vector[n_lv] ar1; // ma coefficients matrix[n_lv, n_lv] theta; // dynamic error parameters array[n] vector[n_lv] error; // smoothing parameters vector[n_sp_trend] lambda_trend; } transformed parameters { // latent states and loading matrix vector[n * n_lv] trend_mus; matrix[n, n_series] trend; array[n] vector[n_lv] LV; array[n] vector[n_lv] epsilon; // LKJ form of covariance matrix matrix[n_lv, n_lv] L_Sigma; // computed error covariance matrix cov_matrix[n_lv] Sigma; matrix[n_series, n_lv] lv_coefs; // basis coefficients vector[num_basis] b; vector[num_basis_trend] b_trend; // observation model basis coefficients b[1:num_basis] = b_raw[1:num_basis]; // process model basis coefficients b_trend[1:9] = b_raw_trend[1:9]; b_trend[10:12] = mu_raw_trend[1] + b_raw_trend[10:12] * sigma_raw_trend[1]; // latent process linear predictors trend_mus = X_trend * b_trend; // derived latent states LV[1] = trend_mus[ytimes_trend[1, 1:n_lv]] + error[1]; epsilon[1] = error[1]; for (i in 2:n) { // lagged error ma process epsilon[i] = theta * error[i - 1]; // full ARMA process LV[i] = trend_mus[ytimes_trend[i, 1:n_lv]] + ar1 .* (LV[i - 1] - trend_mus[ytimes_trend[i - 1, 1:n_lv]])+ epsilon[i] + error[i]; } L_Sigma = diag_pre_multiply(sigma, L_Omega); Sigma = multiply_lower_tri_self_transpose(L_Sigma); lv_coefs = Z; for (i in 1:n){ for (s in 1:n_series){ trend[i, s] = dot_product(lv_coefs[s,], LV[i,]); } } } model { // prior for (Intercept)... b_raw[1] ~ student_t(3, 0, 2.5); // priors for AR parameters ar1 ~ std_normal(); // priors for latent state SD parameters sigma ~ student_t(3, 0, 2.5); // dynamic process models // prior for s(season)_trend... b_raw_trend[1:9] ~ multi_normal_prec(zero_trend[1:9],S_trend1[1:9,1:9] * lambda_trend[1] + S_trend1[1:9,10:18] * lambda_trend[2]); lambda_trend ~ normal(5, 30); sigma_raw_trend ~ student_t(3, 0, 2.5); mu_raw_trend ~ std_normal(); b_raw_trend[trend_rand_idxs] ~ std_normal(); // contemporaneous errors L_Omega ~ lkj_corr_cholesky(2); for(i in 1:n) { error[i] ~ multi_normal_cholesky(trend_zeros, L_Sigma); } // ma coefficients for(i in 1:n_lv){ for(j in 1:n_lv){ if (i != j) theta[i, j] ~ normal(0, 0.2); } } // priors for precision parameters phi ~ gamma(0.01, 0.01); } generated quantities { vector[total_obs] eta; matrix[n, n_series] phi_vec; matrix[n, n_series] mus; vector[n_sp_trend] rho_trend; vector[n_lv] penalty; array[n, n_series] real ypred; penalty = 1.0 / (sigma .* sigma); rho_trend = log(lambda_trend); // posterior predictions eta = X * b; for (s in 1:n_series) { phi_vec[1:n,s] = rep_vector(phi[s], n); } for(s in 1:n_series){ mus[1:n, s] = eta[ytimes[1:n, s]] + trend[1:n, s]; ypred[1:n, s] = beta_rng(inv_logit(mus[1:n, s]) .* phi_vec[1:n, s], (1 - inv_logit(mus[1:n, s])) .* phi_vec[1:n, s]); } } [ FAIL 0 | WARN 0 | SKIP 9 | PASS 515 ] ══ Skipped tests (9) ═══════════════════════════════════════════════════════════ • On CRAN (9): 'test-binomial.R:80:3', 'test-binomial.R:274:3', 'test-example_processing.R:288:3', 'test-example_processing.R:327:3', 'test-example_processing.R:342:3', 'test-example_processing.R:457:3', 'test-mvgam-methods.R:10:3', 'test-mvgam-methods.R:283:3', 'test-nmixture.R:98:3' [ FAIL 0 | WARN 0 | SKIP 9 | PASS 515 ] > > proc.time() user system elapsed 1778.89 4.35 1783.26