# ═══════════════════════════════════════════════════════════ # Tests for boost_trees(), forest_trees(), benchmark(), # cv_score(), tune_sarb(), compare(), sensitivity() # ═══════════════════════════════════════════════════════════ # ── Test data ── make_test_data <- function(n = 100, p = 5, seed = 42) { set.seed(seed) X <- matrix(rnorm(n * p), n, p) colnames(X) <- paste0("V", seq_len(p)) y <- X[,1] * 2 + X[,2] * -1 + rnorm(n, sd = 0.5) list(x = X, y = y, df = data.frame(X, y = y)) } # ── boost_trees ── test_that("boost_trees fits sarb method", { d <- make_test_data() model <- boost_trees(x = d$x, y = d$y, method = "sarb", n_trees = 30, verbose = 0) expect_s3_class(model, "ensemble_model") preds <- predict(model, d$x) expect_length(preds, nrow(d$x)) }) test_that("boost_trees fits gbm method", { d <- make_test_data() model <- boost_trees(x = d$x, y = d$y, method = "gbm", n_trees = 30, verbose = 0) expect_s3_class(model, "ensemble_model") preds <- predict(model, d$x) expect_length(preds, nrow(d$x)) expect_true(all(is.finite(preds))) }) test_that("boost_trees rejects invalid method", { d <- make_test_data() expect_error(boost_trees(x = d$x, y = d$y, method = "invalid"), "Unknown method") }) test_that("boost_trees formula interface works", { d <- make_test_data() model <- boost_trees(y ~ ., data = d$df, method = "sarb", n_trees = 30, verbose = 0) expect_s3_class(model, "ensemble_model") }) # ── forest_trees ── test_that("forest_trees fits rf method", { skip_if_not_installed("ranger") d <- make_test_data() model <- forest_trees(x = d$x, y = d$y, method = "rf", n_trees = 50, verbose = 0) expect_s3_class(model, "ensemble_model") preds <- predict(model, d$x) expect_length(preds, nrow(d$x)) }) test_that("forest_trees fits extratrees method", { skip_if_not_installed("ranger") d <- make_test_data() model <- forest_trees(x = d$x, y = d$y, method = "extratrees", n_trees = 50, verbose = 0) expect_s3_class(model, "ensemble_model") }) # ── cv_score ── test_that("cv_score returns expected structure", { d <- make_test_data() result <- cv_score(y ~ ., data = d$df, method = "sarb", cv_folds = 3, n_trees = 30, verbose = FALSE) expect_true(is.list(result)) expect_true("rmse" %in% names(result)) expect_true("mae" %in% names(result)) expect_true("r2" %in% names(result)) expect_true(result$rmse > 0) }) # ── benchmark ── test_that("benchmark runs with multiple methods", { d <- make_test_data() result <- benchmark(y ~ ., data = d$df, methods = c("sarb", "gbm"), cv_folds = 2, n_trees = 20, verbose = FALSE) expect_s3_class(result, "sarb_benchmark") expect_true(nrow(result$summary) == 2) expect_true("rmse" %in% names(result$summary)) }) # ── tune_sarb ── test_that("tune_sarb finds best params", { d <- make_test_data() result <- tune_sarb(y ~ ., data = d$df, param_grid = list(warmup_frac = c(0.1, 0.5)), cv_folds = 2, n_trees = 20, verbose = FALSE) expect_true(is.list(result)) expect_true("best_params" %in% names(result)) expect_true("best_score" %in% names(result)) expect_true(result$best_score > 0) expect_true(result$best_params$warmup_frac %in% c(0.1, 0.5)) }) # ── compare ── test_that("compare works with two models", { d <- make_test_data() m1 <- boost_trees(x = d$x, y = d$y, method = "sarb", n_trees = 30, verbose = 0) m2 <- boost_trees(x = d$x, y = d$y, method = "gbm", n_trees = 30, verbose = 0) result <- compare(m1, m2, newdata = d$x, y_true = d$y) expect_true(is.list(result)) expect_length(result$scores, 2) }) # ── which_method ── test_that("which_method returns recommendations", { result <- which_method(goal = "accuracy") expect_equal(result, "sarb") result <- which_method(goal = "speed") expect_equal(result, "lightgbm") })