library(testthat) library(PRNG) # Uniformity Test test_that("Uniformity Test", { random_numbers <- runf(n = 100) ks_result <- ks.test(random_numbers, "punif", 0, 1) expect_true(ks_result$p.value > 0.05, "Generated random numbers do not follow a uniform distribution") }) # Independence Test test_that("Independence Test", { random_numbers <- runf(n = 100) autocorr_result <- acf(random_numbers, plot = FALSE) expect_true(abs(autocorr_result$acf[2]) < 0.05, "Consecutive random numbers are not independent.") }) # Seed Sensitivity Test test_that("Seed Sensitivity Test", { seed1 <- runf(n = 100, x00 = 0.1) seed2 <- runf(n = 100, x00 = 0.1001) expect_false(identical(seed1, seed2), "Small changes in seed do not result in different sequences.") }) # Performance Test test_that("Performance Test", { start_time <- Sys.time() runf(n = 100000) end_time <- Sys.time() expect_true(difftime(end_time, start_time, units = "secs") < 1, "Random number generation is too slow.") }) # Edge Cases Test test_that("Edge Cases Test", { expect_error(runf(n = -10), "Negative number of random numbers should throw an error.") expect_error(runf(n = 0), "Zero number of random numbers should throw an error.") }) # Bit-Level Tests test_that("Bit-Level Tests", { bits <- rbits(100) expect_true(all(bits %in% c(0, 1)), "Generated bits are not binary.") expect_true(abs(mean(bits) - 0.5) < 0.05, "Generated bits are not balanced between 0 and 1.") }) # Distribution Tests (Normal) test_that("Distribution Test - Normal", { normal_numbers <- rnorm(n = 100) shapiro_test <- shapiro.test(normal_numbers) expect_true(shapiro_test$p.value > 0.05, "Generated numbers do not follow a normal distribution") }) # Boundary Tests test_that("Boundary Tests", { expect_error(runf(n = 100, x00 = -1), "Seed value out of range should throw an error.") expect_error(runf(n = 100, a1 = 5), "Parameter out of range should throw an error.") })