diff --git a/DESCRIPTION b/DESCRIPTION index dece4db5..e7a6b9e8 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -64,7 +64,8 @@ Suggests: rpart, stringi, testthat (>= 3.0.0) -Remotes: mlr-org/bbotk +Remotes: + mlr-org/mlr3 ByteCompile: no Encoding: UTF-8 Config/testthat/edition: 3 diff --git a/R/mbo_defaults.R b/R/mbo_defaults.R index 69c9664d..e6bcb7f1 100644 --- a/R/mbo_defaults.R +++ b/R/mbo_defaults.R @@ -161,14 +161,13 @@ default_surrogate = function(instance, learner = NULL, n_learner = NULL) { default_rf(noisy) } # stability: evaluate and add a fallback - learner$encapsulate[c("train", "predict")] = "evaluate" require_namespaces("ranger") fallback = mlr3learners::LearnerRegrRanger$new() fallback$param_set$values = insert_named( fallback$param_set$values, list(num.trees = 10L, keep.inbag = TRUE, se.method = "jack") ) - learner$fallback = fallback + learner$encapsulate("evaluate", fallback) if (has_deps) { require_namespaces("mlr3pipelines") @@ -184,8 +183,7 @@ default_surrogate = function(instance, learner = NULL, n_learner = NULL) { learner ) ) - learner$encapsulate[c("train", "predict")] = "evaluate" - learner$fallback = LearnerRegrFeatureless$new() + learner$encapsulate("evaluate", lrn("regr.featureless")) } } diff --git a/tests/testthat/helper.R b/tests/testthat/helper.R index 6acb90d0..41a886c8 100644 --- a/tests/testthat/helper.R +++ b/tests/testthat/helper.R @@ -101,13 +101,13 @@ MAKE_DESIGN = function(instance, n = 4L) { if (requireNamespace("mlr3learners") && requireNamespace("DiceKriging") && requireNamespace("rgenoud")) { library(mlr3learners) - REGR_KM_NOISY = lrn("regr.km", covtype = "matern3_2", optim.method = "gen", control = list(trace = FALSE, max.generations = 2), nugget.estim = TRUE, jitter = 1e-12) - REGR_KM_NOISY$encapsulate = c(train = "callr", predict = "callr") - REGR_KM_DETERM = lrn("regr.km", covtype = "matern3_2", optim.method = "gen", control = list(trace = FALSE, max.generations = 2), nugget.stability = 10^-8) - REGR_KM_DETERM$encapsulate = c(train = "callr", predict = "callr") + REGR_KM_NOISY = lrn("regr.km", covtype = "matern3_2", optim.method = "gen", control = list(trace = FALSE), nugget.estim = TRUE, jitter = 1e-12) + REGR_KM_NOISY$encapsulate("callr", lrn("regr.featureless")) + REGR_KM_DETERM = lrn("regr.km", covtype = "matern3_2", optim.method = "gen", control = list(trace = FALSE), nugget.stability = 10^-8) + REGR_KM_DETERM$encapsulate("callr", lrn("regr.featureless")) } REGR_FEATURELESS = lrn("regr.featureless") -REGR_FEATURELESS$encapsulate = c(train = "callr", predict = "callr") +REGR_FEATURELESS$encapsulate("callr", lrn("regr.featureless")) OptimizerError = R6Class("OptimizerError", inherit = OptimizerBatch, diff --git a/tests/testthat/test_bayesopt_ego.R b/tests/testthat/test_bayesopt_ego.R index ce2081a5..78cb4c11 100644 --- a/tests/testthat/test_bayesopt_ego.R +++ b/tests/testthat/test_bayesopt_ego.R @@ -109,7 +109,7 @@ test_that("stable bayesopt_ego", { expect_true(nrow(instance$archive$data) == 5L) expect_number(acq_function$surrogate$assert_insample_perf, upper = 1) lines = readLines(f) - # expect_true(sum(grepl("Optimizer Error", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) + expect_true(sum(grepl("Optimizer Error", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) expect_true(sum(grepl("Proposing a randomly sampled point", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 2L) # Surrogate using LearnerRegrError as Learner that will fail during train diff --git a/tests/testthat/test_mbo_defaults.R b/tests/testthat/test_mbo_defaults.R index 7cef2365..1c0fc4ba 100644 --- a/tests/testthat/test_mbo_defaults.R +++ b/tests/testthat/test_mbo_defaults.R @@ -23,7 +23,7 @@ test_that("default_surrogate", { expect_r6(surrogate$learner, "LearnerRegrKM") expect_equal_sorted(surrogate$learner$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.stability = 1e-08)) - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") # singlecrit all numeric, noisy @@ -32,7 +32,7 @@ test_that("default_surrogate", { expect_r6(surrogate$learner, "LearnerRegrKM") expect_equal_sorted(surrogate$learner$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.estim = TRUE, jitter = 1e-12)) - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") # twocrit all numeric, deterministic @@ -41,10 +41,10 @@ test_that("default_surrogate", { expect_list(surrogate$learner, types = "LearnerRegrKM") expect_equal_sorted(surrogate$learner[[1L]]$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.stability = 1e-08)) - expect_equal(surrogate$learner[[1L]]$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner[[1L]]$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner[[1L]]$fallback, "LearnerRegrRanger") expect_equal(surrogate$learner[[1L]]$param_set$values, surrogate$learner[[2L]]$param_set$values) - expect_equal(surrogate$learner[[1L]]$encapsulate, surrogate$learner[[2L]]$encapsulate) + expect_equal(surrogate$learner[[1L]]$encapsulation, surrogate$learner[[2L]]$encapsulation) expect_equal(surrogate$learner[[1L]]$fallback, surrogate$learner[[2L]]$fallback) # twocrit all numeric, noisy @@ -53,10 +53,10 @@ test_that("default_surrogate", { expect_list(surrogate$learner, types = "LearnerRegrKM") expect_equal_sorted(surrogate$learner[[1L]]$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.estim = TRUE, jitter = 1e-12)) - expect_equal(surrogate$learner[[1L]]$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner[[1L]]$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner[[1L]]$fallback, "LearnerRegrRanger") expect_equal(surrogate$learner[[1L]]$param_set$values, surrogate$learner[[2L]]$param_set$values) - expect_equal(surrogate$learner[[1L]]$encapsulate, surrogate$learner[[2L]]$encapsulate) + expect_equal(surrogate$learner[[1L]]$encapsulation, surrogate$learner[[2L]]$encapsulation) expect_equal(surrogate$learner[[1L]]$fallback, surrogate$learner[[2L]]$fallback) # singlecrit mixed input @@ -65,7 +65,7 @@ test_that("default_surrogate", { expect_r6(surrogate$learner, "LearnerRegrRanger") expect_equal_sorted(surrogate$learner$param_set$values, list(num.threads = 1L, num.trees = 100L, keep.inbag = TRUE, se.method = "jack")) - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") # twocrit mixed input @@ -74,10 +74,10 @@ test_that("default_surrogate", { expect_list(surrogate$learner, types = "LearnerRegrRanger") expect_equal_sorted(surrogate$learner[[1L]]$param_set$values, list(num.threads = 1L, num.trees = 100L, keep.inbag = TRUE, se.method = "jack")) - expect_equal(surrogate$learner[[1L]]$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner[[1L]]$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner[[1L]]$fallback, "LearnerRegrRanger") expect_equal(surrogate$learner[[1L]]$param_set$values, surrogate$learner[[2L]]$param_set$values) - expect_equal(surrogate$learner[[1L]]$encapsulate, surrogate$learner[[2L]]$encapsulate) + expect_equal(surrogate$learner[[1L]]$encapsulation, surrogate$learner[[2L]]$encapsulation) expect_equal(surrogate$learner[[1L]]$fallback, surrogate$learner[[2L]]$fallback) # singlecrit mixed input deps @@ -152,12 +152,11 @@ test_that("stability and defaults", { # this should trigger a mbo_error instance = MAKE_INST_1D(terminator = trm("evals", n_evals = 5L)) learner = LearnerRegrError$new() - learner$encapsulate[c("train", "predict")] = "evaluate" - learner$fallback = lrn("regr.ranger", num.trees = 10L, keep.inbag = TRUE, se.method = "jack") + learner$encapsulate("evaluate", lrn("regr.ranger", num.trees = 10L, keep.inbag = TRUE, se.method = "jack")) surrogate = default_surrogate(instance, learner = learner, n_learner = 1L) expect_r6(surrogate, "SurrogateLearner") expect_r6(surrogate$learner, "LearnerRegrError") - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") acq_function = default_acqfunction(instance) expect_r6(acq_function, "AcqFunctionEI") @@ -172,17 +171,5 @@ test_that("stability and defaults", { lines = readLines(f) # Nothing should happen here due to the fallback learner expect_true(sum(grepl("Surrogate Train Error", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 0L) - - acq_function$surrogate$learner$reset() - acq_function$surrogate$learner$fallback = NULL - instance$archive$clear() - bayesopt_ego(instance, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer) - expect_true(nrow(instance$archive$data) == 5L) - lines = readLines(f) - # Training fails but this error is not logged due to the "evaluate" encapsulate - expect_equal(acq_function$surrogate$learner$errors, "Surrogate Train Error.") - expect_true(sum(grepl("Surrogate Train Error", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 0L) - expect_true(sum(grepl("Cannot predict", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) - expect_true(sum(grepl("Proposing a randomly sampled point", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) })