R Under development (unstable) (2024-04-30 r86503 ucrt) -- "Unsuffered Consequences" Copyright (C) 2024 The R Foundation for Statistical Computing Platform: x86_64-w64-mingw32/x64 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(testthat) > library(mvgam) Loading required package: mgcv Loading required package: nlme This is mgcv 1.9-1. For overview type 'help("mgcv-package")'. Loading required package: Rcpp Loading required package: brms Loading 'brms' package (version 2.21.0). Useful instructions can be found by typing help('brms'). A more detailed introduction to the package is available through vignette('brms_overview'). Attaching package: 'brms' The following objects are masked from 'package:mgcv': s, t2 The following object is masked from 'package:stats': ar Loading required package: marginaleffects Loading required package: insight Welcome to mvgam. Please cite as: Clark, NJ, and Wells, K. 2022. Dynamic Generalized Additive Models (DGAMs) for forecasting discrete ecological time series. Methods in Ecology and Evolution, 2022, https://doi.org/10.1111/2041-210X.13974 > > test_check("mvgam") // Stan model code generated by package mvgam functions { /* Spectral density function of a Gaussian process * with squared exponential covariance kernel * Args: * l_gp: numeric eigenvalues of an SPD GP * alpha_gp: marginal SD parameter * rho_gp: length-scale parameter * Returns: * numeric values of the GP function evaluated at l_gp */ vector spd_cov_exp_quad(data vector l_gp, real alpha_gp, real rho_gp) { int NB = size(l_gp); vector[NB] out; real constant = square(alpha_gp) * (sqrt(2 * pi()) * rho_gp); real neg_half_lscale2 = -0.5 * square(rho_gp); for (m in 1:NB) { out[m] = constant * exp(neg_half_lscale2 * square(l_gp[m])); } return out; } vector rep_each(vector x, int K) { int N = rows(x); vector[N * K] y; int pos = 1; for (n in 1:N) { for (k in 1:K) { y[pos] = x[n]; pos += 1; } } return y; } } data { int k_gp_trend_time_byrandom; // basis functions for approximate gp vector[k_gp_trend_time_byrandom] l_gp_trend_time_byrandom; // approximate gp eigenvalues array[23] int b_trend_idx_gp_time_byrandom; // gp basis coefficient indices int total_obs; // total number of observations int n; // number of timepoints per series int n_lv; // number of dynamic factors int n_sp; // number of smoothing parameters int n_sp_trend; // number of trend smoothing parameters int n_series; // number of series matrix[n_series, n_lv] Z; // matrix mapping series to latent states int num_basis; // total number of basis coefficients int num_basis_trend; // number of trend basis coefficients vector[num_basis_trend] zero_trend; // prior locations for trend basis coefficients vector[num_basis] zero; // prior locations for basis coefficients matrix[total_obs, num_basis] X; // mgcv GAM design matrix matrix[n * n_lv, num_basis_trend] X_trend; // trend model design matrix array[n, n_series] int ytimes; // time-ordered matrix (which col in X belongs to each [time, series] observation?) int k_gp_time_byrandom; // basis functions for approximate gp vector[k_gp_time_byrandom] l_gp_time_byrandom; // approximate gp eigenvalues array[41] int b_idx_gp_time_byrandom; // gp basis coefficient indices array[n, n_lv] int ytimes_trend; int n_nonmissing; // number of nonmissing observations vector[n_nonmissing] flat_ys; // flattened nonmissing observations matrix[n_nonmissing, num_basis] flat_xs; // X values for nonmissing observations array[n_nonmissing] int obs_ind; // indices of nonmissing observations } transformed data { } parameters { // gp term sd parameters real alpha_gp_trend_time_byrandom; // gp term length scale parameters real rho_gp_trend_time_byrandom; // gp term latent variables vector[k_gp_trend_time_byrandom] z_gp_trend_time_byrandom; // raw basis coefficients vector[num_basis] b_raw; // gp term sd parameters real alpha_gp_time_byrandom; // gp term length scale parameters real rho_gp_time_byrandom; // gp term latent variables vector[k_gp_time_byrandom] z_gp_time_byrandom; vector[num_basis_trend] b_raw_trend; // latent state SD terms vector[n_lv] sigma; // Beta precision parameters vector[n_series] phi; // latent states matrix[n, n_lv] LV; // smoothing parameters vector[n_sp] lambda; vector[n_sp_trend] lambda_trend; } transformed parameters { // latent states and loading matrix vector[n * n_lv] trend_mus; matrix[n, n_series] trend; // basis coefficients vector[num_basis] b; vector[num_basis_trend] b_trend; // observation model basis coefficients b[1:num_basis] = b_raw[1:num_basis]; b[b_idx_gp_time_byrandom] = sqrt(spd_cov_exp_quad(l_gp_time_byrandom, alpha_gp_time_byrandom, rho_gp_time_byrandom)) .* z_gp_time_byrandom; // process model basis coefficients b_trend[1:num_basis_trend] = b_raw_trend[1:num_basis_trend]; b_trend[b_trend_idx_gp_time_byrandom] = sqrt(spd_cov_exp_quad(l_gp_trend_time_byrandom, alpha_gp_trend_time_byrandom, rho_gp_trend_time_byrandom)) .* z_gp_trend_time_byrandom; // latent process linear predictors trend_mus = X_trend * b_trend; // derived latent states for (i in 1:n){ for (s in 1:n_series){ trend[i, s] = dot_product(Z[s,], LV[i,]); } } } model { // prior for (Intercept)... b_raw[1] ~ student_t(3, 0, 2.5); // prior for gp(time):random... z_gp_time_byrandom ~ std_normal(); alpha_gp_time_byrandom ~ student_t(3, 0, 2.5); rho_gp_time_byrandom ~ inv_gamma(2.012803, 0.151413); b_raw[b_idx_gp_time_byrandom] ~ std_normal(); // priors for smoothing parameters lambda ~ normal(5, 30); // priors for latent state SD parameters sigma ~ student_t(3, 0, 2.5); // dynamic process models // prior for gp(time):random_trend... z_gp_trend_time_byrandom ~ std_normal(); alpha_gp_trend_time_byrandom ~ student_t(3, 0, 2.5); rho_gp_trend_time_byrandom ~ inv_gamma(2.012803, 0.151413); b_raw_trend[b_trend_idx_gp_time_byrandom] ~ std_normal(); lambda_trend ~ normal(5, 30); for(j in 1:n_lv){ LV[1, j] ~ normal(trend_mus[ytimes_trend[1, j]], sigma[j]); for(i in 2:n){ LV[i, j] ~ normal(trend_mus[ytimes_trend[i, j]] + LV[i - 1, j] - trend_mus[ytimes_trend[i - 1, j]], sigma[j]); } } // priors for precision parameters phi ~ gamma(0.01, 0.01); { // likelihood functions vector[n_nonmissing] flat_trends; vector[n_nonmissing] flat_phis; flat_trends = (to_vector(trend))[obs_ind]; flat_phis = rep_each(phi, n)[obs_ind]; flat_ys ~ beta( inv_logit(append_col(flat_xs, flat_trends) * append_row(b, 1.0)) .* flat_phis, (1 - inv_logit(append_col(flat_xs, flat_trends) * append_row(b, 1.0))) .* flat_phis); } } generated quantities { vector[total_obs] eta; matrix[n, n_series] phi_vec; matrix[n, n_series] mus; vector[n_sp] rho; vector[n_sp_trend] rho_trend; vector[n_lv] penalty; array[n, n_series] real ypred; rho = log(lambda); rho_trend = log(lambda_trend); penalty = 1.0 / (sigma .* sigma); matrix[n_series, n_lv] lv_coefs = Z; // posterior predictions eta = X * b; for (s in 1:n_series) { phi_vec[1:n,s] = rep_vector(phi[s], n); } for(s in 1:n_series){ mus[1:n, s] = eta[ytimes[1:n, s]] + trend[1:n, s]; ypred[1:n, s] = beta_rng(inv_logit(mus[1:n, s]) .* phi_vec[1:n, s], (1 - inv_logit(mus[1:n, s])) .* phi_vec[1:n, s]); } } [ FAIL 0 | WARN 0 | SKIP 12 | PASS 272 ] ══ Skipped tests (12) ══════════════════════════════════════════════════════════ • On CRAN (12): 'test-binomial.R:4:1', 'test-example_processing.R:4:1', 'test-families.R:21:3', 'test-monotonic.R:4:1', 'test-mvgam-methods.R:41:3', 'test-mvgam-methods.R:89:3', 'test-mvgam-methods.R:178:3', 'test-mvgam-methods.R:285:3', 'test-mvgam.R:102:3', 'test-mvgam.R:533:3', 'test-mvgam_priors.R:4:1', 'test-nmixture.R:4:1' [ FAIL 0 | WARN 0 | SKIP 12 | PASS 272 ] > > proc.time() user system elapsed 1499.39 4.71 1504.93