test_that("Calculations are correct - two class", { # Powers paper lst <- data_powers() df_2_1 <- lst$df_2_1 expect_equal( f_meas_vec(truth = df_2_1$truth, estimate = df_2_1$prediction), 0.5882353, tolerance = 0.0001 ) }) test_that("Calculations are correct - three class", { multi_ex <- data_three_by_three() micro <- data_three_by_three_micro() expect_equal( f_meas(multi_ex, estimator = "macro")[[".estimate"]], macro_metric(f_meas_binary) ) expect_equal( f_meas(multi_ex, estimator = "macro_weighted")[[".estimate"]], macro_weighted_metric(f_meas_binary) ) expect_equal( f_meas(multi_ex, estimator = "micro")[[".estimate"]], 0.6 ) }) test_that("All interfaces gives the same results", { lst <- data_altman() pathology <- lst$pathology path_tbl <- lst$path_tbl path_mat <- unclass(path_tbl) exp <- f_meas_vec(pathology$pathology, pathology$scan) expect_identical( f_meas(path_tbl)[[".estimate"]], exp ) expect_identical( f_meas(path_mat)[[".estimate"]], exp ) expect_identical( f_meas(pathology, truth = pathology, estimate = scan)[[".estimate"]], exp ) }) test_that("Calculations handles NAs", { # Powers paper lst <- data_powers() df_2_1 <- lst$df_2_1 expect_equal( f_meas(df_2_1, truth = truth, estimate = pred_na)[[".estimate"]], 0.5652174, tolerance = 0.0001 ) }) test_that("Case weights calculations are correct", { df <- data.frame( truth = factor(c("x", "x", "y"), levels = c("x", "y")), estimate = factor(c("x", "y", "x"), levels = c("x", "y")), case_weights = c(1L, 1L, 2L) ) expect_identical( f_meas_vec( truth = df$truth, estimate = df$estimate, case_weights = df$case_weights ), 2 / 5 ) py_res <- read_pydata("py-f_meas") py_res_.5 <- read_pydata("py-f_meas_beta_.5") r_metric <- f_meas two_class_example$weights <- read_weights_two_class_example() expect_equal( r_metric(two_class_example, truth, predicted, case_weights = weights)[[ ".estimate" ]], py_res$case_weight$binary ) expect_equal( r_metric( two_class_example, truth, predicted, case_weights = weights, beta = .5 )[[".estimate"]], py_res_.5$case_weight$binary ) py_res <- read_pydata("py-f_meas") py_res_.5 <- read_pydata("py-f_meas_beta_.5") r_metric <- f_meas hpc_cv$weights <- read_weights_hpc_cv() expect_equal( r_metric(hpc_cv, obs, pred, case_weights = weights)[[".estimate"]], py_res$case_weight$macro ) expect_equal( r_metric(hpc_cv, obs, pred, estimator = "micro", case_weights = weights)[[ ".estimate" ]], py_res$case_weight$micro ) expect_equal( r_metric( hpc_cv, obs, pred, estimator = "macro_weighted", case_weights = weights )[[".estimate"]], py_res$case_weight$weighted ) expect_equal( r_metric(hpc_cv, obs, pred, beta = .5, case_weights = weights)[[ ".estimate" ]], py_res_.5$case_weight$macro ) expect_equal( r_metric( hpc_cv, obs, pred, estimator = "micro", beta = .5, case_weights = weights )[[".estimate"]], py_res_.5$case_weight$micro ) expect_equal( r_metric( hpc_cv, obs, pred, estimator = "macro_weighted", beta = .5, case_weights = weights )[[".estimate"]], py_res_.5$case_weight$weighted ) }) test_that("work with class_pred input", { skip_if_not_installed("probably") cp_truth <- probably::as_class_pred(two_class_example$truth, which = 1) cp_estimate <- probably::as_class_pred(two_class_example$predicted, which = 2) fct_truth <- two_class_example$truth fct_truth[1] <- NA fct_estimate <- two_class_example$predicted fct_estimate[2] <- NA expect_identical( f_meas_vec(fct_truth, cp_estimate), f_meas_vec(fct_truth, fct_estimate) ) expect_identical( f_meas_vec(fct_truth, cp_estimate, na_rm = FALSE), NA_real_ ) expect_snapshot( error = TRUE, f_meas_vec(cp_truth, cp_estimate) ) }) test_that("works with hardhat case weights", { lst <- data_altman() df <- lst$pathology imp_wgt <- hardhat::importance_weights(seq_len(nrow(df))) freq_wgt <- hardhat::frequency_weights(seq_len(nrow(df))) expect_no_error( f_meas_vec(df$pathology, df$scan, case_weights = imp_wgt) ) expect_no_error( f_meas_vec(df$pathology, df$scan, case_weights = freq_wgt) ) }) test_that("na_rm argument check", { expect_snapshot( error = TRUE, f_meas_vec(1, 1, na_rm = "yes") ) }) test_that("sklearn equivalent", { py_res <- read_pydata("py-f_meas") py_res_.5 <- read_pydata("py-f_meas_beta_.5") r_metric <- f_meas expect_equal( r_metric(two_class_example, truth, predicted)[[".estimate"]], py_res$binary ) expect_equal( r_metric(two_class_example, truth, predicted, beta = .5)[[".estimate"]], py_res_.5$binary ) py_res <- read_pydata("py-f_meas") py_res_.5 <- read_pydata("py-f_meas_beta_.5") r_metric <- f_meas expect_equal( r_metric(hpc_cv, obs, pred)[[".estimate"]], py_res$macro ) expect_equal( r_metric(hpc_cv, obs, pred, estimator = "micro")[[".estimate"]], py_res$micro ) expect_equal( r_metric(hpc_cv, obs, pred, estimator = "macro_weighted")[[".estimate"]], py_res$weighted ) expect_equal( r_metric(hpc_cv, obs, pred, beta = .5)[[".estimate"]], py_res_.5$macro ) expect_equal( r_metric(hpc_cv, obs, pred, estimator = "micro", beta = .5)[[".estimate"]], py_res_.5$micro ) expect_equal( r_metric(hpc_cv, obs, pred, "macro_weighted", beta = .5)[[".estimate"]], py_res_.5$weighted ) }) test_that("`event_level = 'second'` works", { lst <- data_powers() df <- lst$df_2_1 df_rev <- df df_rev$truth <- stats::relevel(df_rev$truth, "Irrelevant") df_rev$prediction <- stats::relevel(df_rev$prediction, "Irrelevant") expect_equal( f_meas_vec(df$truth, df$prediction), f_meas_vec(df_rev$truth, df_rev$prediction, event_level = "second") ) }) test_that("`NA` values propagate from binary `precision()` (#77)", { truth <- factor(c(rep("a", 2), rep("b", 2))) estimate <- factor(rep("b", length(truth)), levels(truth)) expect_snapshot({ out <- precision_vec(truth, estimate) expect <- f_meas_vec(truth, estimate) }) expect_identical(out, expect) }) test_that("`NA` values propagate from binary `recall()` (#77)", { estimate <- factor(c(rep("a", 2), rep("b", 2))) truth <- factor(rep("b", length(estimate)), levels(estimate)) expect_snapshot({ out <- recall_vec(truth, estimate) expect <- f_meas_vec(truth, estimate) }) expect_identical(out, expect) }) test_that("Binary returns `NA` with a warning when results are undefined (#98)", { # Recall - (tp + fn = 0) levels <- c("a", "b") truth <- factor(c("b", "b"), levels = levels) estimate <- factor(c("a", "b"), levels = levels) expect_snapshot( out <- f_meas_vec(truth, estimate) ) expect_identical(out, NA_real_) # Precision - (tp + fp = 0) levels <- c("a", "b") truth <- factor("a", levels = levels) estimate <- factor("b", levels = levels) expect_snapshot( out <- f_meas_vec(truth, estimate) ) expect_identical(out, NA_real_) }) test_that("Multiclass returns averaged value a warning when results is undefined (#98)", { # recall - (tp + fn = 0) levels <- c("a", "b", "c") truth <- factor(c("a", "b", "b"), levels = levels) estimate <- factor(c("a", "b", "c"), levels = levels) expect_snapshot( out <- f_meas_vec(truth, estimate) ) expect_identical(out, 5 / 6) }) test_that("`NA` is still returned if there are some undefined values but `na.rm = FALSE`", { levels <- c("a", "b", "c") truth <- factor(c("a", "b", "b"), levels = levels) estimate <- factor(c("a", NA, "c"), levels = levels) expect_equal(f_meas_vec(truth, estimate, na_rm = FALSE), NA_real_) expect_warning(f_meas_vec(truth, estimate, na_rm = FALSE), NA) }) test_that("bad argument check", { expect_snapshot( error = TRUE, f_meas_vec(1, 1, beta = "yes") ) }) test_that("range values are correct", { direction <- metric_direction(f_meas) range <- metric_range(f_meas) perfect <- ifelse(direction == "minimize", range[1], range[2]) worst <- ifelse(direction == "minimize", range[2], range[1]) df <- tibble::tibble( truth = factor(c("A", "A", "B", "B", "B")), off = factor(c("B", "B", "A", "A", "A")) ) expect_equal( f_meas_vec(df$truth, df$truth), perfect ) if (direction == "minimize") { expect_gt(f_meas_vec(df$truth, df$off), perfect) expect_lte(f_meas_vec(df$truth, df$off), worst) } if (direction == "maximize") { expect_lt(f_meas_vec(df$truth, df$off), perfect) expect_gte(f_meas_vec(df$truth, df$off), worst) } })