R Under development (unstable) (2024-08-21 r87038 ucrt) -- "Unsuffered Consequences" Copyright (C) 2024 The R Foundation for Statistical Computing Platform: x86_64-w64-mingw32/x64 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(testthat) > Sys.setenv('OMP_THREAD_LIMIT'=2) > library(rlibkriging) Attaching package: 'rlibkriging' The following objects are masked from 'package:base': load, save > > ##library(rlibkriging, lib.loc="bindings/R/Rlibs") > ##library(testthat) > > ## Changes by Yves remove the references to the packages as in 'rlibkriging::simulate', > ## because simulate is not exported as such from rlibkriging > > f <- function(x) { + 1 - 1 / 2 * (sin(12 * x) / (1 + x) + 2 * cos(7 * x) * x^5 + 0.7) + } > ## f <- function(X) apply(X, 1, function(x) prod(sin((x-.5)^2))) > n <- 5 > set.seed(123) > X <- cbind(runif(n)) > y <- f(X) > d <- ncol(X) > > ## kriging model 1 : matern5_2 covariance structure, no trend, no nugget effect > km1 <- DiceKriging::km(design = X, response = y, covtype = "gauss", + formula = ~1, estim.method = "LOO", + parinit = c(.15), control = list(trace = FALSE)) > ##library(rlibkriging) > KM1 <- rlibkriging::KM(design = X, response = y, covtype = "gauss", + formula = ~1, estim.method = "LOO", + parinit = c(.15)) > > test_that("m1.leaveOneOutFun == KM1.leaveOneOutFun", + expect_true( DiceKriging::leaveOneOutFun(km1@covariance@range.val, km1) == + DiceKriging::leaveOneOutFun(km1@covariance@range.val, KM1))) Test passed 🥳 > > test_that("m1.argmax(loo) == KM1.argmax(loo)", + expect_equal(km1@covariance@range.val, + KM1@covariance@range.val, + tol = 0.001)) Test passed 🥳 > > plot(Vectorize(function(.t) DiceKriging::leaveOneOutFun(param = as.numeric(.t), model = km1))) > abline(v = km1@covariance@range.val) > plot(Vectorize(function(.t) rlibkriging::leaveOneOutFun(KM1@Kriging, as.numeric(.t))), + add = TRUE, col = 'red') > abline(v = KM1@covariance@range.val, col = 'red') > > > > ########################################################################## > > context("# A 2D example - Branin-Hoo function") > > branin <- function (x) { + x1 <- x[1] * 15 - 5 + x2 <- x[2] * 15 + (x2 - 5/(4 * pi^2) * (x1^2) + 5/pi * x1 - 6)^2 + + 10 * (1 - 1/(8 * pi)) * cos(x1) + 10 + } > > ## a 16-points factorial design, and the corresponding response > d <- 2; n <- 16 > design.fact <- expand.grid(x1 = seq(0, 1, length.out = 4), + x2 = seq(0, 1, length.out = 4)) > y <- apply(design.fact, 1, DiceKriging::branin) > > library(DiceKriging) Attaching package: 'DiceKriging' The following object is masked _by_ '.GlobalEnv': branin The following object is masked from 'package:rlibkriging': leaveOneOutFun > ## kriging model 1 : matern5_2 covariance structure, no trend, no nugget effect > km1 <- DiceKriging::km(design = design.fact, response = y, covtype = "gauss", + parinit = c(.5, 1), control = list(trace = FALSE)) > rlibkriging:::optim_log(3) > KM1 <- rlibkriging::KM(design = design.fact, response = y, covtype = "gauss", + parinit = c(.5, 1)) BFGS: max iterations: 20 null gradient tolerance: 0.001 constant objective tolerance: 0.001 reparametrize: 1 normalize: 0 lower_bounds: 1.0000e-10 1.0000e-10 upper_bounds: 2.0000 2.0000 start_point: 0.5000 1.0000 iterations: 5 status: CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL start_point: 0.5000 1.0000 solution: 0.7079 2.0000 f_opt: 76.2701 task: CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL warn_flag 0 num_fun_calls 8 num_iters 5 total time spent 0.0003237 sec time spent on searching for Cauchy points 0 time spent on subspace minimization 0 time spent on line search 0 f(x) in the previous iteration 76.2703 factr * epsilon 2.22045e-06 best objective: 76.2701 best solution: 0.7079 2.0000 > rlibkriging:::optim_log(0) > > test_that("m1.logLikFun == as_m1.logLikFun", + expect_true(DiceKriging::logLikFun(km1@covariance@range.val, km1) == + DiceKriging::logLikFun(km1@covariance@range.val, KM1))) Test passed 🎉 > > test_that("m1.argmax(logLig) == as_m1.argmax(logLig)", + expect_equal(km1@covariance@range.val, + KM1@covariance@range.val, + tol = 0.01)) Test passed 😸 > > ll <- function(Theta){ + apply(Theta, 1, + function(theta) DiceKriging::logLikFun(theta, km1)) + } > as_ll <- function(Theta){ + apply(Theta, 1, + function(theta) rlibkriging::logLikelihoodFun(KM1@Kriging, theta)$logLikelihood[1]) + } > t <- seq(from = 0.01, to = 2, length.out = 51) > ttg <- expand.grid(t, t) > contour(t, t, + matrix(ll(as.matrix(ttg)), nrow = length(t)), nlevels = 30) > contour(t, t, + matrix(as_ll(as.matrix(ttg)), nrow = length(t)), nlevels = 30, + add = TRUE, col = 'red') > points(km1@covariance@range.val[1], + km1@covariance@range.val[2]) > points(KM1@covariance@range.val[1], + KM1@covariance@range.val[2], + col = 'red') > > pred <- DiceKriging::predict(km1, + newdata = matrix(.5, ncol = 2), type = "UK", + checkNames = FALSE, light.return = TRUE) > Pred <- DiceKriging::predict(KM1, + newdata = matrix(.5, ncol = 2), type = "UK", + checkNames = FALSE, light.return = TRUE) > > test_that("p$mean, Pred$mean", + expect_equal(pred$mean[1], Pred$mean[1], tol = 0.1)) Test passed 🥇 > test_that("pred$sd, Pred$sd", + expect_equal(pred$sd[1], Pred$sd[1], tol = 0.1)) Test passed 🌈 > > ################################################################################ > > f <- function(x) { + 1 - 1 / 2 * (sin(12 * x) / (1 + x) + 2 * cos(7 * x) * x^5 + 0.7) + } > plot(f) > set.seed(123) > X <- as.matrix(runif(5)) > y <- f(X) > points(X, y, col = 'blue') > > #rlibkriging:::optim_log(2) > #rlibkriging:::optim_use_variogram_bounds_heuristic(TRUE) > > r <- Kriging(y, X, kernel = "gauss") > x <- seq(from = 0, to = 1, length.out = 101) > s_x <- simulate(r, nsim = 3, x = x) > lines(x, s_x[ , 1], col = 'blue') > lines(x, s_x[ , 2], col = 'blue') > lines(x, s_x[ , 3], col = 'blue') > > ## sk_x = simulate(as.km(r), nsim=3, newdata=x) > ## lines(x,sk_x[,1],col='red') > ## lines(x,sk_x[,2],col='red') > ## lines(x,sk_x[,3],col='red') > > ################################################################################ > f <- function(x) 1 - 1 / 2 * (sin(12 * x) / (1 + x) + 2 * cos(7 * x) * x^5 + 0.7) > # f <- function(X) apply(X, 1, function(x) prod(sin((x-.5)^2))) > n <- 5 > set.seed(123) > X <- cbind(runif(n)) > y <- f(X) > d <- ncol(X) > plot(X,y) > > formula <-~1 > design <- X > response <-y > covtype <- "gauss" > > ## k <<- DiceKriging::km(formula = formula, design = design, > ## response = response, covtype = covtype, > ## coef.cov = 0.5, coef.var=0.5, coef.trend = 0.5, > ## control = list(trace=F)) > ## NOT working for logLikFun, because @method is not available (bug in > ## DiceKriging ?) > ## as_k <<- rlibkriging::KM(formula = formula,design = design, > ## response = response, covtype = covtype, > ## coef.cov = 0.5, coef.var = 0.5, coef.trend = 0.5) > > km2 <<- DiceKriging::km(formula = formula,design = design, + response = response, covtype = covtype, + coef.cov = 0.5, coef.var=0.5, coef.trend = 0.5, + control = list(trace=F)) > km2@method <- "LL" > km2@case <- "LLconcentration_beta" > > ## XXXY Here a warning is thrown > suppressWarnings(KM2 <<- rlibkriging::KM(formula = formula,design = design, + response = response, covtype = covtype, + coef.cov = km2@covariance@range.val, + coef.var= km2@covariance@sd2, + coef.trend = km2@trend.coef)) > > test_that("DiceKriging::T == rlibkriging::T", expect_equal(km2@T, KM2@T)) Test passed 🥳 > test_that("DiceKriging::M == rlibkriging::M", expect_equal(km2@M, KM2@M)) Test passed 🥳 > test_that("DiceKriging::z == rlibkriging::z", expect_equal(km2@z, KM2@z)) Test passed 🎉 > > # plot(Vectorize(function(.t) DiceKriging::logLikFun(c(.t,0.5),km2)[1]), > # xlim = c(0.000001, 1),ylim=c(-5000,0)) > # plot(Vectorize(function(.t) > # rlibkriging::logLikelihoodFun(KM2@Kriging, .t)$logLikelihood[1]), > # xlim = c(0.000001, 1),add=TRUE,col='red') > # abline(v=km2@covariance@range.val,col='blue') > > x = km2@covariance@range.val > test_that("DiceKriging::logLik == rlibkriging::logLikelihood", + expect_equal(DiceKriging::logLikFun(x, km2)[1], + rlibkriging::logLikelihoodFun(KM2@Kriging,x)$logLikelihood[1])) Test passed 😸 > > x <- runif(ncol(X)) > test_that("DiceKriging::logLik == rlibkriging::logLikelihood", + expect_equal(DiceKriging::logLikFun(c(x,0.5), km2)[1], # logLikFun arg is c(theta,sigma2) + rlibkriging::logLikelihoodFun(KM2@Kriging,x)$logLikelihood[1])) Test passed 🌈 > # not impl. in DiceKriging: LLconcentration_beta for LOO > #test_that("DiceKriging::leaveOneOut == rlibkriging::leaveOneOut", > # expect_equal(DiceKriging::leaveOneOutFun(x, km2)[1], > # rlibkriging::leaveOneOutFun(KM2@Kriging,x)$leaveOneOut[1])) > > .x=seq(from=0,to=1,length.out=11) > plot(f) > points(X,y) > lines(.x,DiceKriging::predict(km2,newdata=.x,type="UK",checkNames=FALSE)$mean,xlim=c(0,1)) > lines(.x,DiceKriging::predict(KM2,newdata=.x,type="UK",checkNames=FALSE)$mean,col='red') > lines(.x,KM2@Kriging$predict(.x)$mean,col='red',lty=2) > > x <- 0.5 > test_that("Consitency of 'DiceKriging' and 'rlibkriging' 'predict' methods", + expect_equal(DiceKriging::predict(km2,newdata = x, type = "UK", + checkNames = FALSE)$mean[1], + DiceKriging::predict(KM2, newdata = x, type = "UK")$mean[1], + tol = 0.01)) Test passed 🎉 > > x <- matrix(X[2, ], ncol = d) + 0.001 > n <- 1000 > set.seed(123) > sims_km2 <- DiceKriging::simulate(km2, nsim = n,newdata = x, + checkNames = FALSE, cond = TRUE, + nugget.sim=1e-10) > sims_KM2 <- DiceKriging::simulate(KM2, nsim = n, newdata = x, + checkNames = FALSE , cond = TRUE) > t <- t.test(sims_km2, sims_KM2, var.equal = FALSE) > > if (t$p.value < 0.05) { + plot(f) + points(X, y) + xx <- seq(from = 0, to = 1, length.out = 101) + for (i in 1:100) { + lines(xx, DiceKriging::simulate(km2, nsim = 1, newdata = xx, + checkNames = FALSE, cond = TRUE, + nugget.sim = 1e-10), + col = rgb(0, 0, 1, 0.02)) + lines(xx, DiceKriging::simulate(KM2, nsim = 1, newdata = xx, + checkNames = FALSE, cond=TRUE, + nugget.sim = 0), + col = rgb(1, 0, 0, 0.02)) + } + } > print(t) Welch Two Sample t-test data: sims_km2 and sims_KM2 t = 0.16304, df = 1452.2, p-value = 0.8705 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -7.139511e-07 8.433887e-07 sample estimates: mean of x mean of y 0.4399124 0.4399124 > ## issue #100 > ## test_that("DiceKriging::simulate ~= rlibkriging::simulate", > ## expect_true(t$p.value>0.05)) > ################################################################################ > > > f <- function(X) apply(X, 1, function(x) prod(sin((x * pi - .5)^2))) > n <- 5#100 > set.seed(123) > X <- cbind(runif(n))#,runif(n),runif(n)) > y <- f(X) > d <- ncol(X) > ## plot(function(x)f(as.matrix(x))) > ## points(X,y) > > test_args <- function(formula, design, response ,covtype, estim.method ) { + context(paste0("asDiceKriging: ", + paste0(sep = ", ", + formula, + paste0("design ", nrow(design), "x", ncol(design)), + paste0("response ", nrow(response), "x", ncol(response)), + covtype))) + + set.seed(123) + + parinit <- runif(ncol(design)) + k <<- DiceKriging::km(formula = formula, design = design, + response = response, covtype = covtype, + estim.method = estim.method, + parinit = parinit, control = list(trace = FALSE)) + as_k <<- rlibkriging::KM(formula = formula, design = design, + response = response, covtype = covtype, + estim.method = estim.method, + parinit = parinit) + + ##print(k) + ##print(as_k) + ##if (e=="MLE") { + ## plot(Vectorize(function(t)DiceKriging::logLikFun(t,k)[1]),xlim=c(0.0001,2)) + ##} else { + ## plot(Vectorize(function(t)DiceKriging::leaveOneOutFun(t,k)[1]),xlim=c(0.0001,2)) + ##} + ##abline(v=k@covariance@range.val) + ##if (e=="MLE") { + ## plot(Vectorize(function(t)rlibkriging::logLikelihoodFun(as_k@Kriging,t)$logLikelihood[1]), + ## xlim = c(0.0001,2), add=T, col='red') + ##} else { + ## plot(Vectorize(function(t)rlibkriging::leaveOneOutFun(as_k@Kriging,t)$leaveOneOut[1]), + ## xlim=c(0.0001,2),add=T,col='red') + ##} + ##abline(v=as_k@covariance@range.val,col='red') + + t <- runif(ncol(X)) + test_that("DiceKriging::logLikFun == rlibkriging::logLikelihood", + expect_equal(DiceKriging::logLikFun(t, k)[1], + rlibkriging::logLikelihoodFun(as_k@Kriging,t)$logLikelihood[1])) + test_that("DiceKriging::leaveOneOutFun == rlibkriging::leaveOneOut", + expect_equal(DiceKriging::leaveOneOutFun(t, k)[1], + rlibkriging::leaveOneOutFun(as_k@Kriging, t)$leaveOneOut[1])) + + x <- matrix(runif(d),ncol=d) + test_that("DiceKriging::predict == rlibkriging::predict", + expect_equal(DiceKriging::predict(k, newdata = x, type = "UK", + checkNames = FALSE)$mean[1], + DiceKriging::predict(as_k, newdata = x, type = "UK")$mean[1], + tol = 0.01)) + + n <- 1000 + set.seed(123) + sims_km2 <<- DiceKriging::simulate(k, nsim = n, newdata = x, + checkNames = FALSE, cond = TRUE, + nugget.sim = 1e-10) + sims_KM2 <<- DiceKriging::simulate(as_k, nsim = n,newdata = x, + checkNames = FALSE, cond = TRUE) + t = t.test(t(sims_km2), sims_KM2, var.equal = FALSE , paired = FALSE) + print(t) + ## issue #100 + test_that("DiceKriging::simulate ~= rlibkriging::simulate", + expect_true(t$p.value>0.05)) + } > > ## Test the whole matrix of km features already available > for (f in c( ~1 , ~. , ~.^2 )) + for (co in c("gauss","exp","matern3_2","matern5_2")) + for (e in c("MLE","LOO")) { + print(paste0("kernel:", co, " objective:", e, + " trend:", paste0(f, collapse = ""))) + test_args(formula = f, design = X, + response = y, covtype = co, estim.method = e) + } [1] "kernel:gauss objective:MLE trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.11878, df = 1564.2, p-value = 0.9055 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.614359e-07 7.467075e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:gauss objective:LOO trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.083257, df = 1657.3, p-value = 0.9337 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.909163e-07 7.521723e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:exp objective:MLE trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.51389, df = 999, p-value = 0.6074 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -4.542367e-07 7.765503e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554329 Test passed 🎊 [1] "kernel:exp objective:LOO trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.092299, df = 1633.6, p-value = 0.9265 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.831666e-07 7.506373e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern3_2 objective:MLE trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.51428, df = 999, p-value = 0.6072 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -4.541144e-07 7.766716e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554329 Test passed 🎊 [1] "kernel:matern3_2 objective:LOO trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.51366, df = 999, p-value = 0.6076 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -4.543100e-07 7.764769e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554329 Test passed 🎊 [1] "kernel:matern5_2 objective:MLE trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.11878, df = 1564.2, p-value = 0.9055 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.614359e-07 7.467075e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern5_2 objective:LOO trend:~1" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.081543, df = 1661.8, p-value = 0.935 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.924042e-07 7.524740e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:gauss objective:MLE trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:gauss objective:LOO trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:exp objective:MLE trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:exp objective:LOO trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.12389, df = 1551, p-value = 0.9014 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.574026e-07 7.460438e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern3_2 objective:MLE trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611139e-07 7.386815e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern3_2 objective:LOO trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.51428, df = 999, p-value = 0.6072 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -4.541150e-07 7.766727e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554329 Test passed 🎊 [1] "kernel:matern5_2 objective:MLE trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern5_2 objective:LOO trend:~." Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.51259, df = 999.01, p-value = 0.6083 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -4.546449e-07 7.761453e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554329 Test passed 🎊 [1] "kernel:gauss objective:MLE trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:gauss objective:LOO trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:exp objective:MLE trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:exp objective:LOO trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.12389, df = 1551, p-value = 0.9014 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -6.574026e-07 7.460438e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern3_2 objective:MLE trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611139e-07 7.386815e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern3_2 objective:LOO trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.51428, df = 999, p-value = 0.6072 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -4.541150e-07 7.766727e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554329 Test passed 🎊 [1] "kernel:matern5_2 objective:MLE trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.26802, df = 1227.3, p-value = 0.7887 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -5.611121e-07 7.386816e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554328 Test passed 🎊 [1] "kernel:matern5_2 objective:LOO trend:~.^2" Test passed 😸 Test passed 🎉 Test passed 🥳 Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.51259, df = 999.01, p-value = 0.6083 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -4.546449e-07 7.761453e-07 sample estimates: mean of x mean of y -0.2554327 -0.2554329 Test passed 🎊 > > proc.time() user system elapsed 13.87 1.62 15.51