R Under development (unstable) (2026-01-08 r89292 ucrt) -- "Unsuffered Consequences" Copyright (C) 2026 The R Foundation for Statistical Computing Platform: x86_64-w64-mingw32/x64 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > if(requireNamespace('DiceKriging', quietly = TRUE)) { + library(testthat) + Sys.setenv('OMP_THREAD_LIMIT'=2) + + library(DiceKriging) + library(rlibkriging) + + ##library(rlibkriging, lib.loc="bindings/R/Rlibs") + ##library(testthat) + + ## Changes by Yves remove the references to the packages as in 'rlibkriging::simulate', + ## because simulate is not exported as such from rlibkriging + + f <- function(x) { + 1 - 1 / 2 * (sin(12 * x) / (1 + x) + 2 * cos(7 * x) * x^5 + 0.7) + } + ## f <- function(X) apply(X, 1, function(x) prod(sin((x-.5)^2))) + n <- 5 + set.seed(123) + X <- cbind(runif(n)) + y <- f(X) + d <- ncol(X) + + ## kriging model 1 : matern5_2 covariance structure, no trend, no nugget effect + km1 <- DiceKriging::km(design = X, response = y, covtype = "gauss", + formula = ~1, estim.method = "LOO", + parinit = c(.15), control = list(trace = FALSE)) + ##library(rlibkriging) + KM1 <- rlibkriging::KM(design = X, response = y, covtype = "gauss", + formula = ~1, estim.method = "LOO", + parinit = c(.15)) + + test_that("m1.leaveOneOutFun == KM1.leaveOneOutFun", + expect_true( DiceKriging::leaveOneOutFun(km1@covariance@range.val, km1) == + DiceKriging::leaveOneOutFun(km1@covariance@range.val, KM1))) + + test_that("m1.argmax(loo) == KM1.argmax(loo)", + expect_equal(km1@covariance@range.val, + KM1@covariance@range.val, + tol = 0.001)) + + plot(Vectorize(function(.t) DiceKriging::leaveOneOutFun(param = as.numeric(.t), model = km1))) + abline(v = km1@covariance@range.val) + plot(Vectorize(function(.t) rlibkriging::leaveOneOutFun(KM1@Kriging, as.numeric(.t))), + add = TRUE, col = 'red') + abline(v = KM1@covariance@range.val, col = 'red') + + + + ########################################################################## + + context("# A 2D example - Branin-Hoo function") + + branin <- function (x) { + x1 <- x[1] * 15 - 5 + x2 <- x[2] * 15 + (x2 - 5/(4 * pi^2) * (x1^2) + 5/pi * x1 - 6)^2 + + 10 * (1 - 1/(8 * pi)) * cos(x1) + 10 + } + + ## a 16-points factorial design, and the corresponding response + d <- 2; n <- 16 + design.fact <- expand.grid(x1 = seq(0, 1, length.out = 4), + x2 = seq(0, 1, length.out = 4)) + y <- apply(design.fact, 1, DiceKriging::branin) + + library(DiceKriging) + ## kriging model 1 : matern5_2 covariance structure, no trend, no nugget effect + km1 <- DiceKriging::km(design = design.fact, response = y, covtype = "gauss", + parinit = c(.5, 1), control = list(trace = FALSE)) + rlibkriging:::optim_set_log_level(3) + KM1 <- rlibkriging::KM(design = design.fact, response = y, covtype = "gauss", + parinit = c(.5, 1)) + rlibkriging:::optim_set_log_level(0) + + test_that("m1.logLikFun == as_m1.logLikFun", + expect_true(DiceKriging::logLikFun(km1@covariance@range.val, km1) == + DiceKriging::logLikFun(km1@covariance@range.val, KM1))) + + test_that("m1.argmax(logLig) == as_m1.argmax(logLig)", + expect_equal(km1@covariance@range.val, + KM1@covariance@range.val, + tol = 0.01)) + + ll <- function(Theta){ + apply(Theta, 1, + function(theta) DiceKriging::logLikFun(theta, km1)) + } + as_ll <- function(Theta){ + apply(Theta, 1, + function(theta) rlibkriging::logLikelihoodFun(KM1@Kriging, theta)$logLikelihood[1]) + } + t <- seq(from = 0.01, to = 2,,5) + ttg <- expand.grid(t, t) + contour(t, t, + matrix(ll(as.matrix(ttg)), nrow = length(t)), nlevels = 30) + contour(t, t, + matrix(as_ll(as.matrix(ttg)), nrow = length(t)), nlevels = 30, + add = TRUE, col = 'red') + points(km1@covariance@range.val[1], + km1@covariance@range.val[2]) + points(KM1@covariance@range.val[1], + KM1@covariance@range.val[2], + col = 'red') + + pred <- DiceKriging::predict(km1, + newdata = matrix(.5, ncol = 2), type = "UK", + checkNames = FALSE, light.return = TRUE) + Pred <- DiceKriging::predict(KM1, + newdata = matrix(.5, ncol = 2), type = "UK", + checkNames = FALSE, light.return = TRUE) + + test_that("p$mean, Pred$mean", + expect_equal(pred$mean[1], Pred$mean[1], tol = 0.1)) + test_that("pred$sd, Pred$sd", + expect_equal(pred$sd[1], Pred$sd[1], tol = 0.1)) + + ################################################################################ + + f <- function(x) { + 1 - 1 / 2 * (sin(12 * x) / (1 + x) + 2 * cos(7 * x) * x^5 + 0.7) + } + plot(f) + set.seed(123) + X <- as.matrix(runif(5)) + y <- f(X) + points(X, y, col = 'blue') + + #rlibkriging:::optim_log(2) + #rlibkriging:::optim_use_variogram_bounds_heuristic(TRUE) + + r <- Kriging(y, X, kernel = "gauss") + x <- seq(0,1,,5) + s_x <- simulate(r, nsim = 3, x = x) + lines(x, s_x[ , 1], col = 'blue') + lines(x, s_x[ , 2], col = 'blue') + lines(x, s_x[ , 3], col = 'blue') + + ## sk_x = simulate(as.km(r), nsim=3, newdata=x) + ## lines(x,sk_x[,1],col='red') + ## lines(x,sk_x[,2],col='red') + ## lines(x,sk_x[,3],col='red') + + ################################################################################ + f <- function(x) 1 - 1 / 2 * (sin(12 * x) / (1 + x) + 2 * cos(7 * x) * x^5 + 0.7) + # f <- function(X) apply(X, 1, function(x) prod(sin((x-.5)^2))) + n <- 5 + set.seed(123) + X <- cbind(runif(n)) + y <- f(X) + d <- ncol(X) + plot(X,y) + + formula <-~1 + design <- X + response <-y + covtype <- "gauss" + + ## k <<- DiceKriging::km(formula = formula, design = design, + ## response = response, covtype = covtype, + ## coef.cov = 0.5, coef.var=0.5, coef.trend = 0.5, + ## control = list(trace=F)) + ## NOT working for logLikFun, because @method is not available (bug in + ## DiceKriging ?) + ## as_k <<- rlibkriging::KM(formula = formula,design = design, + ## response = response, covtype = covtype, + ## coef.cov = 0.5, coef.var = 0.5, coef.trend = 0.5) + + km2 <<- DiceKriging::km(formula = formula,design = design, + response = response, covtype = covtype, + coef.cov = 0.5, coef.var=0.5, coef.trend = 0.5, + control = list(trace=F)) + km2@method <- "LL" + km2@case <- "LLconcentration_beta" + + ## XXXY Here a warning is thrown + suppressWarnings(KM2 <<- rlibkriging::KM(formula = formula,design = design, + response = response, covtype = covtype, + coef.cov = km2@covariance@range.val, + coef.var= km2@covariance@sd2, + coef.trend = km2@trend.coef)) + + test_that("DiceKriging::T == rlibkriging::T", expect_equal(km2@T, KM2@T)) + test_that("DiceKriging::M == rlibkriging::M", expect_equal(km2@M, KM2@M)) + test_that("DiceKriging::z == rlibkriging::z", expect_equal(km2@z, KM2@z)) + + # plot(Vectorize(function(.t) DiceKriging::logLikFun(c(.t,0.5),km2)[1]), + # xlim = c(0.000001, 1),ylim=c(-5000,0)) + # plot(Vectorize(function(.t) + # rlibkriging::logLikelihoodFun(KM2@Kriging, .t)$logLikelihood[1]), + # xlim = c(0.000001, 1),add=TRUE,col='red') + # abline(v=km2@covariance@range.val,col='blue') + + x = km2@covariance@range.val + test_that("DiceKriging::logLik == rlibkriging::logLikelihood", + expect_equal(DiceKriging::logLikFun(x, km2)[1], + rlibkriging::logLikelihoodFun(KM2@Kriging,x)$logLikelihood[1])) + + x <- runif(ncol(X)) + test_that("DiceKriging::logLik == rlibkriging::logLikelihood", + expect_equal(DiceKriging::logLikFun(c(x,0.5), km2)[1], # logLikFun arg is c(theta,sigma2) + rlibkriging::logLikelihoodFun(KM2@Kriging,x)$logLikelihood[1])) + # not impl. in DiceKriging: LLconcentration_beta for LOO + #test_that("DiceKriging::leaveOneOut == rlibkriging::leaveOneOut", + # expect_equal(DiceKriging::leaveOneOutFun(x, km2)[1], + # rlibkriging::leaveOneOutFun(KM2@Kriging,x)$leaveOneOut[1])) + + .x=seq(from=0,to=1,length.out=11) + plot(f) + points(X,y) + lines(.x,DiceKriging::predict(km2,newdata=.x,type="UK",checkNames=FALSE)$mean,xlim=c(0,1)) + lines(.x,DiceKriging::predict(KM2,newdata=.x,type="UK",checkNames=FALSE)$mean,col='red') + lines(.x,KM2@Kriging$predict(.x)$mean,col='red',lty=2) + + x <- 0.5 + test_that("Consitency of 'DiceKriging' and 'rlibkriging' 'predict' methods", + expect_equal(DiceKriging::predict(km2,newdata = x, type = "UK", + checkNames = FALSE)$mean[1], + DiceKriging::predict(KM2, newdata = x, type = "UK")$mean[1], + tol = 0.01)) + + x <- matrix(X[2, ], ncol = d) + 0.001 + n <- 1000 + set.seed(123) + sims_km2 <- DiceKriging::simulate(km2, nsim = n,newdata = x, + checkNames = FALSE, cond = TRUE, + nugget.sim=1e-10) + sims_KM2 <- DiceKriging::simulate(KM2, nsim = n, newdata = x, + checkNames = FALSE , cond = TRUE) + t <- t.test(sims_km2, sims_KM2, var.equal = FALSE) + + if (t$p.value < 0.05) { + plot(f) + points(X, y) + xx <- seq(0,1,,5) + for (i in 1:100) { + lines(xx, DiceKriging::simulate(km2, nsim = 1, newdata = xx, + checkNames = FALSE, cond = TRUE, + nugget.sim = 1e-10), + col = rgb(0, 0, 1, 0.02)) + lines(xx, DiceKriging::simulate(KM2, nsim = 1, newdata = xx, + checkNames = FALSE, cond=TRUE, + nugget.sim = 0), + col = rgb(1, 0, 0, 0.02)) + } + } + print(t) + ## issue #100 + ## test_that("DiceKriging::simulate ~= rlibkriging::simulate", + ## expect_true(t$p.value>0.05)) + ################################################################################ + + + f <- function(X) apply(X, 1, function(x) prod(sin((x * pi - .5)^2))) + n <- 5#100 + set.seed(123) + X <- cbind(runif(n))#,runif(n),runif(n)) + y <- f(X) + d <- ncol(X) + ## plot(function(x)f(as.matrix(x))) + ## points(X,y) + + test_args <- function(formula, design, response ,covtype, estim.method ) { + context(paste0("asDiceKriging: ", + paste0(sep = ", ", + formula, + paste0("design ", nrow(design), "x", ncol(design)), + paste0("response ", nrow(response), "x", ncol(response)), + covtype))) + + set.seed(123) + + parinit <- runif(ncol(design)) + k <<- DiceKriging::km(formula = formula, design = design, + response = response, covtype = covtype, + estim.method = estim.method, + parinit = parinit, control = list(trace = FALSE)) + as_k <<- rlibkriging::KM(formula = formula, design = design, + response = response, covtype = covtype, + estim.method = estim.method, + parinit = parinit) + + ##print(k) + ##print(as_k) + ##if (e=="MLE") { + ## plot(Vectorize(function(t)DiceKriging::logLikFun(t,k)[1]),xlim=c(0.0001,2)) + ##} else { + ## plot(Vectorize(function(t)DiceKriging::leaveOneOutFun(t,k)[1]),xlim=c(0.0001,2)) + ##} + ##abline(v=k@covariance@range.val) + ##if (e=="MLE") { + ## plot(Vectorize(function(t)rlibkriging::logLikelihoodFun(as_k@Kriging,t)$logLikelihood[1]), + ## xlim = c(0.0001,2), add=T, col='red') + ##} else { + ## plot(Vectorize(function(t)rlibkriging::leaveOneOutFun(as_k@Kriging,t)$leaveOneOut[1]), + ## xlim=c(0.0001,2),add=T,col='red') + ##} + ##abline(v=as_k@covariance@range.val,col='red') + + t <- runif(ncol(X)) + test_that("DiceKriging::logLikFun == rlibkriging::logLikelihood", + expect_equal(DiceKriging::logLikFun(t, k)[1], + rlibkriging::logLikelihoodFun(as_k@Kriging,t)$logLikelihood[1])) + test_that("DiceKriging::leaveOneOutFun == rlibkriging::leaveOneOut", + expect_equal(DiceKriging::leaveOneOutFun(t, k)[1], + rlibkriging::leaveOneOutFun(as_k@Kriging, t)$leaveOneOut[1])) + + x <- matrix(runif(d),ncol=d) + test_that("DiceKriging::predict == rlibkriging::predict", + expect_equal(DiceKriging::predict(k, newdata = x, type = "UK", + checkNames = FALSE)$mean[1], + DiceKriging::predict(as_k, newdata = x, type = "UK")$mean[1], + tol = 0.01)) + + n <- 100 + set.seed(123) + sims_km2 <<- DiceKriging::simulate(k, nsim = n, newdata = x, + checkNames = FALSE, cond = TRUE, + nugget.sim = 1e-10) + sims_KM2 <<- DiceKriging::simulate(as_k, nsim = n,newdata = x, + checkNames = FALSE, cond = TRUE) + t = t.test(t(sims_km2), sims_KM2, var.equal = FALSE , paired = FALSE) + print(t) + ## issue #100 + test_that("DiceKriging::simulate ~= rlibkriging::simulate", + expect_true(t$p.value>0.05)) + } + + ## Test the whole matrix of km features already available + for (f in c( ~1 , ~. , ~.^2 )) + for (co in c("gauss","exp","matern3_2","matern5_2")) + for (e in c("MLE","LOO")) { + print(paste0("kernel:", co, " objective:", e, + " trend:", paste0(f, collapse = ""))) + test_args(formula = f, design = X, + response = y, covtype = co, estim.method = e) + } + } Attaching package: 'rlibkriging' The following object is masked from 'package:DiceKriging': leaveOneOutFun The following objects are masked from 'package:base': load, save Test passed with 1 success 🥳. Test passed with 1 success 🥳. Preallocating 1 KModel structures (n=16, p=1)... BFGS (start 1/1): objective: LL max iterations: 20 null gradient tolerance: 0.001 constant objective tolerance: 0.001 reparametrize: 1 normalize: 0 lower_bounds: 1.0000e-10 1.0000e-10 upper_bounds: 2.0000 2.0000 start_point: 0.5000 1.0000 Best solution from start point 1 with objective: 76.2701 Test passed with 1 success 🎉. Test passed with 1 success 😸. Test passed with 1 success 🥇. Test passed with 1 success 🌈. Test passed with 1 success 🥳. Test passed with 1 success 🥳. Test passed with 1 success 🎉. Test passed with 1 success 😸. Test passed with 1 success 🌈. Test passed with 1 success 🎉. Welch Two Sample t-test data: sims_km2 and sims_KM2 t = 0.16304, df = 1452.2, p-value = 0.8705 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -7.139511e-07 8.433887e-07 sample estimates: mean of x mean of y 0.4399124 0.4399124 [1] "kernel:gauss objective:MLE trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.96181, df = 172.55, p-value = 0.3375 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -1.110476e-06 3.221274e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:gauss objective:LOO trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.94832, df = 180.23, p-value = 0.3442 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -1.154202e-06 3.290145e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554331 Test passed with 1 success 🎉. [1] "kernel:exp objective:MLE trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99057, df = 99, p-value = 0.3243 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.070133e-07 2.715438e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:exp objective:LOO trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99041, df = 99, p-value = 0.3244 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.071649e-07 2.715282e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:matern3_2 objective:MLE trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99041, df = 99, p-value = 0.3244 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.071654e-07 2.715283e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:matern3_2 objective:LOO trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.94776, df = 180.52, p-value = 0.3445 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -1.156057e-06 3.293027e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554331 Test passed with 1 success 🎉. [1] "kernel:matern5_2 objective:MLE trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99065, df = 99, p-value = 0.3243 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.069437e-07 2.715506e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:matern5_2 objective:LOO trend:~1" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.94752, df = 180.65, p-value = 0.3446 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -1.156851e-06 3.294259e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554331 Test passed with 1 success 🎉. [1] "kernel:gauss objective:MLE trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506198e-07 2.940637e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:gauss objective:LOO trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99083, df = 99.001, p-value = 0.3242 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.067782e-07 2.715676e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:exp objective:MLE trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506198e-07 2.940637e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:exp objective:LOO trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.96799, df = 168.56, p-value = 0.3344 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -1.090786e-06 3.189604e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:matern3_2 objective:MLE trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506221e-07 2.940642e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:matern3_2 objective:LOO trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99041, df = 99, p-value = 0.3244 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.071667e-07 2.715287e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:matern5_2 objective:MLE trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506198e-07 2.940637e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:matern5_2 objective:LOO trend:~." Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99041, df = 99, p-value = 0.3244 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.071657e-07 2.715284e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:gauss objective:MLE trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506198e-07 2.940637e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:gauss objective:LOO trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99083, df = 99.001, p-value = 0.3242 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.067782e-07 2.715676e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:exp objective:MLE trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506198e-07 2.940637e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:exp objective:LOO trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.96799, df = 168.56, p-value = 0.3344 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -1.090786e-06 3.189604e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:matern3_2 objective:MLE trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506221e-07 2.940642e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:matern3_2 objective:LOO trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99041, df = 99, p-value = 0.3244 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.071667e-07 2.715287e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. [1] "kernel:matern5_2 objective:MLE trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 1.0118, df = 130.03, p-value = 0.3135 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.506198e-07 2.940637e-06 sample estimates: mean of x mean of y -0.255432 -0.255433 Test passed with 1 success 🎉. [1] "kernel:matern5_2 objective:LOO trend:~.^2" Test passed with 1 success 😸. Test passed with 1 success 🎉. Test passed with 1 success 🥳. Welch Two Sample t-test data: t(sims_km2) and sims_KM2 t = 0.99041, df = 99, p-value = 0.3244 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: -9.071657e-07 2.715284e-06 sample estimates: mean of x mean of y -0.2554320 -0.2554329 Test passed with 1 success 🎉. > > proc.time() user system elapsed 8.39 1.00 9.42