diff --git a/.lintr b/.lintr index 436e1b52..6fa3c0ff 100644 --- a/.lintr +++ b/.lintr @@ -5,5 +5,6 @@ linters: linters_with_defaults( object_name_linter = object_name_linter(c("snake_case", "CamelCase")), # only allow snake case and camel case object names cyclocomp_linter = NULL, # do not check function complexity commented_code_linter = NULL, # allow code in comments - line_length_linter = line_length_linter(120) + line_length_linter = line_length_linter(120), + indentation_linter(indent = 2L, hanging_indent_style = "never") ) diff --git a/DESCRIPTION b/DESCRIPTION index 48499663..f146ca46 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -41,13 +41,13 @@ BugReports: https://github.com/mlr-org/mlr3mbo/issues Depends: R (>= 3.1.0) Imports: - bbotk (>= 1.0.0), + bbotk (>= 1.1.0), checkmate (>= 2.0.0), data.table, lgr (>= 0.3.4), - mlr3 (>= 0.14.0), + mlr3 (>= 0.20.2.9000), mlr3misc (>= 0.11.0), - mlr3tuning (>= 1.0.0), + mlr3tuning (>= 1.0.1), paradox (>= 1.0.0), spacefillr, R6 (>= 2.4.1) @@ -64,9 +64,11 @@ Suggests: rgenoud, rmarkdown, rpart, + rush, stringi, testthat (>= 3.0.0) -Remotes: mlr-org/bbotk +Remotes: + mlr-org/mlr3 ByteCompile: no Encoding: UTF-8 Config/testthat/edition: 3 @@ -90,6 +92,7 @@ Collate: 'AcqFunctionSmsEgo.R' 'AcqOptimizer.R' 'aaa.R' + 'OptimizerAsyncMbo.R' 'OptimizerMbo.R' 'mlr_result_assigners.R' 'ResultAssigner.R' @@ -97,7 +100,9 @@ Collate: 'ResultAssignerSurrogate.R' 'Surrogate.R' 'SurrogateLearner.R' + 'SurrogateLearnerAsync.R' 'SurrogateLearnerCollection.R' + 'TunerAsyncMbo.R' 'TunerMbo.R' 'mlr_loop_functions.R' 'bayesopt_ego.R' @@ -109,6 +114,7 @@ Collate: 'helper.R' 'loop_function.R' 'mbo_defaults.R' + 'mlr_callbacks.R' 'sugar.R' 'zzz.R' VignetteBuilder: knitr diff --git a/NAMESPACE b/NAMESPACE index 9b8e3b96..a12ea3ba 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -17,13 +17,16 @@ export(AcqFunctionPI) export(AcqFunctionSD) export(AcqFunctionSmsEgo) export(AcqOptimizer) +export(OptimizerAsyncMbo) export(OptimizerMbo) export(ResultAssigner) export(ResultAssignerArchive) export(ResultAssignerSurrogate) export(Surrogate) export(SurrogateLearner) +export(SurrogateLearnerAsync) export(SurrogateLearnerCollection) +export(TunerAsyncMbo) export(TunerMbo) export(acqf) export(acqfs) @@ -58,6 +61,7 @@ importFrom(R6,R6Class) importFrom(stats,dnorm) importFrom(stats,pnorm) importFrom(stats,quantile) +importFrom(stats,rexp) importFrom(stats,runif) importFrom(stats,setNames) importFrom(utils,bibentry) diff --git a/R/AcqFunctionCB.R b/R/AcqFunctionCB.R index 81328e05..358d62f6 100644 --- a/R/AcqFunctionCB.R +++ b/R/AcqFunctionCB.R @@ -64,17 +64,25 @@ AcqFunctionCB = R6Class("AcqFunctionCB", assert_r6(surrogate, "SurrogateLearner", null.ok = TRUE) assert_number(lambda, lower = 0, finite = TRUE) - constants = ps(lambda = p_dbl(lower = 0, default = 2)) + constants = ps( + lambda = p_dbl(lower = 0, default = 2) + ) constants$values$lambda = lambda - super$initialize("acq_cb", constants = constants, surrogate = surrogate, requires_predict_type_se = TRUE, direction = "same", label = "Lower / Upper Confidence Bound", man = "mlr3mbo::mlr_acqfunctions_cb") + super$initialize("acq_cb", + constants = constants, + surrogate = surrogate, + requires_predict_type_se = TRUE, + direction = "same", + label = "Lower / Upper Confidence Bound", + man = "mlr3mbo::mlr_acqfunctions_cb") } ), private = list( - .fun = function(xdt, ...) { - constants = list(...) - lambda = constants$lambda + .fun = function(xdt, lambda) { + #constants = list(...) + #lambda = constants$lambda p = self$surrogate$predict(xdt) cb = p$mean - self$surrogate_max_to_min * lambda * p$se data.table(acq_cb = cb) diff --git a/R/OptimizerAsyncMbo.R b/R/OptimizerAsyncMbo.R new file mode 100644 index 00000000..2a9c67f9 --- /dev/null +++ b/R/OptimizerAsyncMbo.R @@ -0,0 +1,203 @@ +#' @title Asynchronous Model Based Optimization +#' +#' @name mlr_optimizers_async_mbo +#' +#' @description +#' `OptimizerAsyncMbo` class that implements asynchronous Model Based Optimization (MBO). +#' +#' @section Parameters: +#' \describe{ +#' \item{`initial_design`}{`data.table::data.table()`\cr +#' Initial design of the optimization. +#' If `NULL`, a design of size `design_size` is generated with `design_function`.} +#' \item{`design_size`}{`integer(1)`\cr +#' Size of the initial design.} +#' \item{`design_function`}{`character(1)`\cr +#' Function to generate the initial design. +#' One of `c("random", "sobol", "lhs")`.} +#' \item{`n_workers`}{`integer(1)`\cr +#' Number of parallel workers. +#' If `NULL`, all rush workers set with [rush::rush_plan()] are used.} +#' } +#' +#' @template param_surrogate +#' @template param_acq_function +#' @template param_acq_optimizer +#' +#' @param param_set [paradox::ParamSet]\cr +#' Set of control parameters. +#' +#' @export +OptimizerAsyncMbo = R6Class("OptimizerAsyncMbo", + inherit = OptimizerAsync, + + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function(surrogate = NULL, acq_function = NULL, acq_optimizer = NULL, param_set = NULL) { + default_param_set = ps( + initial_design = p_uty(), + design_size = p_int(lower = 1, default = 10), + design_function = p_fct(c("random", "sobol", "lhs"), default = "sobol"), + n_workers = p_int(lower = 1L) + ) + param_set = c(default_param_set, param_set) + + param_set$set_values(design_size = 10, design_function = "sobol") + + super$initialize("async_mbo", + param_set = param_set, + param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct"), + properties = c("dependencies", "single-crit"), + packages = c("mlr3mbo", "rush"), + label = "Asynchronous Model Based Optimization", + man = "mlr3mbo::OptimizerAsyncMbo") + + self$surrogate = assert_r6(surrogate, classes = "Surrogate", null.ok = TRUE) + self$acq_function = assert_r6(acq_function, classes = "AcqFunction", null.ok = TRUE) + self$acq_optimizer = assert_r6(acq_optimizer, classes = "AcqOptimizer", null.ok = TRUE) + }, + + + #' @description + #' Performs the optimization on a [OptimInstanceAsyncSingleCrit] or [OptimInstanceAsyncMultiCrit] until termination. + #' The single evaluations will be written into the [ArchiveAsync]. + #' The result will be written into the instance object. + #' + #' @param inst ([OptimInstanceAsyncSingleCrit] | [OptimInstanceAsyncMultiCrit]). + #' + #' @return [data.table::data.table()] + optimize = function(inst) { + pv = self$param_set$values + + # initial design + design = if (is.null(pv$initial_design)) { + # generate initial design + generate_design = switch(pv$design_function, + "random" = generate_design_random, + "sobol" = generate_design_sobol, + "lhs" = generate_design_lhs) + + lg$debug("Generating sobol design with size %s", pv$design_size) + generate_design(inst$search_space, n = pv$design_size)$data + } else { + # use provided initial design + lg$debug("Using provided initial design with size %s", nrow(pv$initial_design)) + pv$initial_design + } + optimize_async_default(inst, self, design, n_workers = pv$n_workers) + } + ), + + active = list( + #' @template field_surrogate + surrogate = function(rhs) { + if (missing(rhs)) { + private$.surrogate + } else { + private$.surrogate = assert_r6(rhs, classes = "Surrogate", null.ok = TRUE) + } + }, + + #' @template field_acq_function + acq_function = function(rhs) { + if (missing(rhs)) { + private$.acq_function + } else { + private$.acq_function = assert_r6(rhs, classes = "AcqFunction", null.ok = TRUE) + } + }, + + #' @template field_acq_optimizer + acq_optimizer = function(rhs) { + if (missing(rhs)) { + private$.acq_optimizer + } else { + private$.acq_optimizer = assert_r6(rhs, classes = "AcqOptimizer", null.ok = TRUE) + } + }, + + #' @template field_param_classes + param_classes = function(rhs) { + assert_ro_binding(rhs) + param_classes_surrogate = c("logical" = "ParamLgl", "integer" = "ParamInt", "numeric" = "ParamDbl", "factor" = "ParamFct") + if (!is.null(self$surrogate)) { + param_classes_surrogate = param_classes_surrogate[c("logical", "integer", "numeric", "factor") %in% self$surrogate$feature_types] # surrogate has precedence over acq_function$surrogate + } + param_classes_acq_opt = if (!is.null(self$acq_optimizer)) { + self$acq_optimizer$optimizer$param_classes + } else { + c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct") + } + unname(intersect(param_classes_surrogate, param_classes_acq_opt)) + }, + + #' @template field_properties + properties = function(rhs) { + assert_ro_binding(rhs) + + properties_surrogate = "dependencies" + if (!is.null(self$surrogate)) { + if ("missings" %nin% self$surrogate$properties) { + properties_surrogate = character() + } + } + unname(c(properties_surrogate)) + }, + + #' @template field_packages + packages = function(rhs) { + assert_ro_binding(rhs) + union("mlr3mbo", c(self$acq_function$packages, self$surrogate$packages, self$acq_optimizer$optimizer$packages)) + } + ), + + private = list( + .surrogate = NULL, + .acq_function = NULL, + .acq_optimizer = NULL, + + .optimize = function(inst) { + pv = self$param_set$values + search_space = inst$search_space + archive = inst$archive + + if (is.null(self$acq_function)) { + self$acq_function = self$acq_optimizer$acq_function %??% default_acqfunction(inst) + } + + if (is.null(self$surrogate)) { + self$surrogate = self$acq_function$surrogate %??% default_surrogate(inst) + } + + if (is.null(self$acq_optimizer)) { + self$acq_optimizer = default_acqoptimizer(self$acq_function) + } + + self$surrogate$archive = inst$archive + self$acq_function$surrogate = self$surrogate + self$acq_optimizer$acq_function = self$acq_function + + lg$debug("Optimizer '%s' evaluates the initial design", self$id) + get_private(inst)$.eval_queue() + + lg$debug("Optimizer '%s' starts the tuning phase", self$id) + + # actual loop + while (!inst$is_terminated) { + # sample + self$acq_function$surrogate$update() + self$acq_function$update() + xdt = self$acq_optimizer$optimize() + xs = transpose_list(xdt)[[1]] + + # eval + get_private(inst)$.eval_point(xs) + } + } + ) +) + +#' @include aaa.R +optimizers[["async_mbo"]] = OptimizerAsyncMbo diff --git a/R/SurrogateLearnerAsync.R b/R/SurrogateLearnerAsync.R new file mode 100644 index 00000000..584f463b --- /dev/null +++ b/R/SurrogateLearnerAsync.R @@ -0,0 +1,87 @@ +#' @title Surrogate Model Containing a Single Learner +#' +#' @description +#' Surrogate model containing a single [mlr3::LearnerRegr]. +#' +#' @section Parameters: +#' \describe{ +#' \item{`assert_insample_perf`}{`logical(1)`\cr +#' Should the insample performance of the [mlr3::LearnerRegr] be asserted after updating the surrogate? +#' If the assertion fails (i.e., the insample performance based on the `perf_measure` does not meet the +#' `perf_threshold`), an error is thrown. +#' Default is `FALSE`. +#' } +#' \item{`perf_measure`}{[mlr3::MeasureRegr]\cr +#' Performance measure which should be use to assert the insample performance of the [mlr3::LearnerRegr]. +#' Only relevant if `assert_insample_perf = TRUE`. +#' Default is [mlr3::mlr_measures_regr.rsq]. +#' } +#' \item{`perf_threshold`}{`numeric(1)`\cr +#' Threshold the insample performance of the [mlr3::LearnerRegr] should be asserted against. +#' Only relevant if `assert_insample_perf = TRUE`. +#' Default is `0`. +#' } +#' \item{`catch_errors`}{`logical(1)`\cr +#' Should errors during updating the surrogate be caught and propagated to the `loop_function` which can then handle +#' the failed acquisition function optimization (as a result of the failed surrogate) appropriately by, e.g., proposing a randomly sampled point for evaluation? +#' Default is `TRUE`. +#' } +#' \item{`impute_method`}{`character(1)`\cr +#' Method to impute missing values in the surrogate model. +#' } +#' } +#' +#' @export +SurrogateLearnerAsync = R6Class("SurrogateLearnerAsync", + inherit = SurrogateLearner, + + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + #' + #' @param learner ([mlr3::LearnerRegr]). + #' @template param_archive_surrogate + #' @template param_col_y_surrogate + #' @template param_cols_x_surrogate + initialize = function(learner, archive = NULL, cols_x = NULL, col_y = NULL) { + super$initialize(learner = learner, archive = archive, cols_x = cols_x, col_y = col_y) + + extra_ps = ps( + impute_method = p_fct(c("mean", "random"), default = "random") + ) + + private$.param_set = c(private$.param_set, extra_ps) + private$.param_set$set_values(impute_method = "random") + } + ), + + private = list( + .update = function() { + xydt = self$archive$rush$fetch_tasks_with_state(states = c("queued", "running", "finished"))[, c(self$cols_x, self$cols_y, "state"), with = FALSE] + + + if (self$param_set$values$impute_method == "mean") { + setnafill(xydt, type = "const", fill = mean(xydt[[self$cols_y]], na.rm = TRUE), cols = self$cols_y) + } else if (self$param_set$values$impute_method == "random") { + walk(self$cols_y, function(col) { + min = min(xydt[[col]], na.rm = TRUE) + max = max(xydt[[col]], na.rm = TRUE) + xydt[c("queued", "running"), (col) := runif(.N, min, max), on = "state"] + }) + } + set(xydt, j = "state", value = NULL) + + task = TaskRegr$new(id = "surrogate_task", backend = xydt, target = self$cols_y) + assert_learnable(task, learner = self$learner) + self$learner$train(task) + + if (self$param_set$values$assert_insample_perf) { + measure = assert_measure(self$param_set$values$perf_measure %??% mlr_measures$get("regr.rsq"), task = task, learner = self$learner) + private$.insample_perf = self$learner$predict(task)$score(measure, task = task, learner = self$learner) + self$assert_insample_perf + } + } + ) +) + diff --git a/R/TunerAsyncMbo.R b/R/TunerAsyncMbo.R new file mode 100644 index 00000000..70afae28 --- /dev/null +++ b/R/TunerAsyncMbo.R @@ -0,0 +1,57 @@ +#' @title Asynchronous Model Based Tuning +#' +#' @include OptimizerAsyncMbo.R +#' @name mlr_tuners_async_mbo +#' +#' @description +#' `TunerAsyncMbo` class that implements asynchronous Model Based Tuning (MBO). +#' +#' @section Parameters: +#' \describe{ +#' \item{`initial_design`}{`data.table::data.table()`\cr +#' Initial design of the optimization. +#' If `NULL`, a design of size `design_size` is generated with `design_function`.} +#' \item{`design_size`}{`integer(1)`\cr +#' Size of the initial design.} +#' \item{`design_function`}{`character(1)`\cr +#' Function to generate the initial design. +#' One of `c("random", "sobol", "lhs")`.} +#' \item{`n_workers`}{`integer(1)`\cr +#' Number of parallel workers. +#' If `NULL`, all rush workers set with [rush::rush_plan()] are used.} +#' } +#' +#' @template param_surrogate +#' @template param_acq_function +#' @template param_acq_optimizer +#' +#' @param param_set [paradox::ParamSet]\cr +#' Set of control parameters. +#' +#' @export +TunerAsyncMbo = R6Class("TunerAsyncMbo", + inherit = mlr3tuning::TunerAsyncFromOptimizerAsync, + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function(surrogate = NULL, acq_function = NULL, acq_optimizer = NULL, param_set = NULL) { + optimizer = OptimizerAsyncMbo$new( + surrogate = surrogate, + acq_function = acq_function, + acq_optimizer = acq_optimizer, + param_set = param_set + ) + + super$initialize( + optimizer = optimizer, + man = "mlr3tuning::mlr_tuners_async_mbo" + ) + } + ) +) + +mlr_tuners$add("async_mbo", TunerAsyncMbo) + +#' @include aaa.R +tuners[["async_mbo"]] = TunerAsyncMbo diff --git a/R/aaa.R b/R/aaa.R index ed02c255..4a4686c6 100644 --- a/R/aaa.R +++ b/R/aaa.R @@ -1,2 +1,3 @@ optimizers = list() tuners = list() +callbacks = list() diff --git a/R/mbo_defaults.R b/R/mbo_defaults.R index 69c9664d..ccceb89b 100644 --- a/R/mbo_defaults.R +++ b/R/mbo_defaults.R @@ -161,14 +161,13 @@ default_surrogate = function(instance, learner = NULL, n_learner = NULL) { default_rf(noisy) } # stability: evaluate and add a fallback - learner$encapsulate[c("train", "predict")] = "evaluate" require_namespaces("ranger") fallback = mlr3learners::LearnerRegrRanger$new() fallback$param_set$values = insert_named( fallback$param_set$values, list(num.trees = 10L, keep.inbag = TRUE, se.method = "jack") ) - learner$fallback = fallback + learner$encapsulate("evaluate", fallback) if (has_deps) { require_namespaces("mlr3pipelines") @@ -184,14 +183,15 @@ default_surrogate = function(instance, learner = NULL, n_learner = NULL) { learner ) ) - learner$encapsulate[c("train", "predict")] = "evaluate" - learner$fallback = LearnerRegrFeatureless$new() + learner$encapsulate("evaluate", lrn("regr.featureless")) } } if (is.null(n_learner)) n_learner = length(instance$archive$cols_y) - if (n_learner == 1L) { + if (n_learner == 1L && inherits(instance, "OptimInstanceBatch")) { SurrogateLearner$new(learner) + } else if (n_learner == 1L && inherits(instance, "OptimInstanceAsync")) { + SurrogateLearnerAsync$new(learner) } else { learners = replicate(n_learner, learner$clone(deep = TRUE), simplify = FALSE) SurrogateLearnerCollection$new(learners) @@ -213,6 +213,8 @@ default_acqfunction = function(instance) { assert_r6(instance, classes = "OptimInstance") if (inherits(instance, "OptimInstanceBatchSingleCrit")) { AcqFunctionEI$new() + } else if (inherits(instance, "OptimInstanceAsyncSingleCrit")) { + AcqFunctionCB$new() } else if (inherits(instance, "OptimInstanceBatchMultiCrit")) { AcqFunctionSmsEgo$new() } diff --git a/R/mlr_callbacks.R b/R/mlr_callbacks.R new file mode 100644 index 00000000..51c8de61 --- /dev/null +++ b/R/mlr_callbacks.R @@ -0,0 +1,177 @@ +#' @title Sample Lambda from an Uniform Distribution +#' +#' @name mlr3mbo.sample_lambda +#' +#' @description +#' This [CallbackAsyncTuning] samples the lambda parameter of the confidence bounds acquisition function. +#' The lambda value is drawn from a uniform distribution with `min` and `max` as bounds. +#' +#' @param min_lambda (`numeric(1)`)\cr +#' Minimum value of the lambda parameter. +#' Defaults to `0.01`. +#' @param max_lambda (`numeric(1)`)\cr +#' Maximum value of the lambda parameter. +#' Defaults to `10`. +#' +#' @examples +#' clbk("mlr3mbo.sample_lambda", min_lambda = 0.01, max_lambda = 10) +NULL + +load_callback_sample_lambda_uniform = function() { + callback_async_tuning("mlr3mbo.sample_lambda_uniform", + label = "Sample Lambda Uniform", + man = "mlr3mbo::mlr3mbo.sample_lambda_uniform", + + on_optimization_begin = function(callback, context) { + assert_class(context$optimizer$acq_function, "AcqFunctionCB") + callback$state$min_lambda = assert_number(callback$state$min_lambda, lower = 0, null.ok = TRUE) %??% 0.01 + callback$state$max_lambda = assert_number(callback$state$max_lambda, lower = 0, null.ok = TRUE) %??% 10 + }, + + on_worker_begin = function(callback, context) { + callback$state$lambda = runif(1, min = callback$state$min_lambda, max = callback$state$max_lambda) + context$optimizer$acq_function$constants$set_values(lambda = callback$state$lambda) + }, + + on_optimizer_after_eval = function(callback, context) { + context$ys = c(context$ys, list(lambda = callback$state$lambda)) + } + ) +} + +callbacks[["mlr3mbo.sample_lambda_uniform"]] = load_callback_sample_lambda_uniform + +#' @title Sample Lambda from an Exponential Distribution +#' +#' @name mlr3mbo.sample_lambda_exponential +#' +#' @description +#' This [CallbackAsyncTuning] samples the lambda parameter of the confidence bounds acquisition function. +#' The lambda value is drawn from an exponential distribution with rate `1 / lambda`. +#' +#' @examples +#' clbk("mlr3mbo.sample_lambda_exponential") +NULL + +load_callback_sample_lambda_exponential = function() { + callback_async_tuning("mlr3mbo.sample_lambda_exponential", + label = "Sample Lambda Exponential", + man = "mlr3mbo::mlr3mbo.sample_lambda_exponential", + + on_optimization_begin = function(callback, context) { + assert_class(context$optimizer$acq_function, "AcqFunctionCB") + }, + + on_worker_begin = function(callback, context) { + lambda = context$optimizer$acq_function$constants$values$lambda + callback$state$lambda = rexp(1, 1 / lambda) + context$optimizer$acq_function$constants$set_values(lambda = callback$state$lambda) + }, + + on_optimizer_after_eval = function(callback, context) { + context$ys = c(context$ys, list(lambda = callback$state$lambda)) + } + ) +} + +callbacks[["mlr3mbo.sample_lambda_exponential"]] = load_callback_sample_lambda_exponential + +#' @title Exponential Decay of Lambda +#' +#' @name mlr3mbo.lambda_decay +#' +#' @description +#' This [CallbackAsyncTuning] decays the lambda parameter of the confidence bounds acquisition function. +#' The initial lambda value is drawn from an exponential distribution with rate `1 / lambda`. +#' The lambda value is updated after each evaluation by the formula `lambda * exp(-rate * (t %% period))`. +#' +#' @param rate (`numeric(1)`)\cr +#' Rate of the exponential decay. +#' Defaults to `0.1`. +#' @param period (`integer(1)`)\cr +#' Period of the exponential decay. +#' Defaults to `25`. +#' +#' @examples +#' clbk("mlr3mbo.exponential_lambda_decay", rate = 0.1, period = 25) +NULL + +load_callback_exponential_lambda_decay = function() { + callback_async_tuning("mlr3mbo.exponential_lambda_decay", + label = "Exponential Decay of Lambda", + man = "mlr3mbo::mlr3mbo.exponential_lambda_decay", + + on_optimization_begin = function(callback, context) { + assert_class(context$optimizer$acq_function, "AcqFunctionCB") + callback$state$rate = assert_number(callback$state$rate, lower = 0, null.ok = TRUE) %??% 0.1 + callback$state$period = assert_integer(callback$state$period, lower = 1, null.ok = TRUE) %??% 25 + callback$state$t = 0 + }, + + on_worker_begin = function(callback, context) { + lambda = context$optimizer$acq_function$constants$values$lambda + callback$state$lambda_0 = rexp(1, 1 / lambda) + context$optimizer$acq_function$constants$set_values(lambda = callback$state$lambda_0) + }, + + on_optimizer_after_eval = function(callback, context) { + if (!is.null(context$extra)) { # skip initial design + lambda_0 = callback$state$lambda_0 + t = callback$state$t + lambda = lambda_0 * exp(-callback$state$rate * (t %% callback$state$period)) + callback$state$t = t + 1 + context$optimizer$acq_function$constants$set_values(lambda = lambda) + context$ys = c(context$ys, list(lambda_0 = callback$state$lambda_0, lambda = lambda)) + } + } + ) +} + +callbacks[["mlr3mbo.exponential_lambda_decay"]] = load_callback_exponential_lambda_decay + +#' @title Epsilon Decay +#' +#' @name mlr3mbo.sample_epsilon +#' +#' @description +#' This [CallbackAsyncTuning] decays the epsilon parameter of the expected improvement acquisition function. +#' +#' @param rate (`numeric(1)`)\cr +#' Rate of the exponential decay. +#' Defaults to `0.1`. +#' @param period (`integer(1)`)\cr +#' Period of the exponential decay. +#' Defaults to `25`. +#' @examples +#' clbk("mlr3mbo.sample_epsilon", rate = 0.1, period = 25) +NULL + +load_callback_epsilon_decay = function() { + callback_async_tuning("mlr3mbo.epsilon_decay", + label = "Episilon Decay", + man = "mlr3mbo::mlr3mbo.epsilon_decay", + + on_optimization_begin = function(callback, context) { + assert_class(context$optimizer$acq_function, "AcqFunctionEI") + epsilon = context$optimizer$acq_function$constants$values$epsilon + callback$state$epsilon_0 = epsilon + callback$state$epsilon = epsilon + callback$state$rate = assert_number(callback$state$rate, lower = 0, null.ok = TRUE) %??% 0.1 + callback$state$period = assert_number(callback$state$period, lower = 1, null.ok = TRUE) %??% 25 + callback$state$t = 0 + }, + + on_optimizer_after_eval = function(callback, context) { + if (!is.null(context$extra)) { # skip initial design + t = callback$state$t + epsilon_0 = callback$state$epsilon_0 + epsilon = epsilon_0 * exp(-callback$state$rate * (t %% callback$state$period)) + callback$state$t = t + 1 + context$optimizer$acq_function$constants$set_values(epsilon = epsilon) + context$ys = c(context$ys, list(epsilon_0 = epsilon_0, epsilon = epsilon)) + } + } + ) +} + +callbacks[["mlr3mbo.epsilon_decay"]] = load_callback_epsilon_decay diff --git a/R/zzz.R b/R/zzz.R index 003b52c1..62d0e570 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -9,7 +9,7 @@ #' @import lgr #' @import mlr3 #' @import mlr3tuning -#' @importFrom stats setNames runif dnorm pnorm quantile +#' @importFrom stats setNames runif dnorm pnorm quantile rexp #' @useDynLib mlr3mbo c_sms_indicator c_eps_indicator "_PACKAGE" @@ -25,10 +25,17 @@ register_mlr3tuning = function() { iwalk(tuners, function(obj, nm) x$add(nm, obj)) } # nocov end +register_mlr3misc = function() { + # nocov start + x = utils::getFromNamespace("mlr_callbacks", ns = "mlr3misc") + iwalk(callbacks, function(obj, nm) x$add(nm, obj)) +} # nocov end + .onLoad = function(libname, pkgname) { # nolint # nocov start register_namespace_callback(pkgname, "bbotk", register_bbotk) register_namespace_callback(pkgname, "mlr3tuning", register_mlr3tuning) + register_namespace_callback(pkgname, "mlr3misc", register_mlr3misc) assign("lg", lgr::get_logger("bbotk"), envir = parent.env(environment())) @@ -41,6 +48,7 @@ register_mlr3tuning = function() { # nocov start walk(names(optimizers), function(id) bbotk::mlr_optimizers$remove(id)) walk(names(tuners), function(id) mlr3tuning::mlr_tuners$remove(id)) + walk(names(callbacks), function(id) mlr3misc::mlr_callbacks$remove(id)) } # nocov end # static code checks should not complain about commonly used data.table columns diff --git a/attic/OptimizerADBO.R b/attic/OptimizerADBO.R new file mode 100644 index 00000000..3cdd4754 --- /dev/null +++ b/attic/OptimizerADBO.R @@ -0,0 +1,152 @@ +#' @title Asynchronous Decentralized Bayesian Optimization +#' @name mlr_optimizers_adbo +#' +#' @description +#' Asynchronous Decentralized Bayesian Optimization (ADBO). +#' +#' @note +#' The \eqn{\lambda} parameter of the upper confidence bound acquisition function controls the trade-off between exploration and exploitation. +#' A large \eqn{\lambda} value leads to more exploration, while a small \eqn{\lambda} value leads to more exploitation. +#' ADBO can use periodic exponential decay to reduce \eqn{\lambda} periodically to the exploitation phase. +#' +#' @section Parameters: +#' \describe{ +#' \item{`lambda`}{`numeric(1)`\cr +#' \eqn{\lambda} value used for the confidence bound. +#' Defaults to `1.96`.} +#' \item{`exponential_decay`}{`lgl(1)`\cr +#' Whether to use periodic exponential decay for \eqn{\lambda}.} +#' \item{`rate`}{`numeric(1)`\cr +#' Rate of the exponential decay.} +#' \item{`t`}{`integer(1)`\cr +#' Period of the exponential decay.} +#' \item{`initial_design_size`}{`integer(1)`\cr +#' Size of the initial design.} +#' \item{`initial_design`}{`data.table`\cr +#' Initial design.} +#' \item{`impute_method`}{`character(1)`\cr +#' Imputation method for missing values in the surrogate model.} +#' \item{`n_workers`}{`integer(1)`\cr +#' Number of workers to use. +#' Defaults to the number of workers set by `rush::rush_plan()`} +#' } +#' +#' @export +OptimizerADBO = R6Class("OptimizerADBO", + inherit = OptimizerAsyncMbo, + + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function() { + param_set = ps( + lambda = p_dbl(lower = 0, default = 1.96), + exponential_decay = p_lgl(default = TRUE), + rate = p_dbl(lower = 0, default = 0.1), + period = p_int(lower = 1L, default = 25L), + design_size = p_int(lower = 1L), + initial_design = p_uty(), + impute_method = p_fct(c("mean", "random"), default = "random"), + n_workers = p_int(lower = 1L, default = NULL, special_vals = list(NULL)) + ) + + param_set$set_values(lambda = 1.96, exponential_decay = TRUE, rate = 0.1, period = 25L, design_size = 1L, impute_method = "random") + + super$initialize("adbo", + param_set = param_set, + param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct"), + properties = c("dependencies", "single-crit"), + packages = "mlr3mbo", + label = "Asynchronous Decentralized Bayesian Optimization", + man = "mlr3mbo::OptimizerADBO") + }, + + + #' @description + #' Performs the optimization on a [OptimInstanceAsyncSingleCrit] or [OptimInstanceAsyncMultiCrit] until termination. + #' The single evaluations will be written into the [ArchiveAsync]. + #' The result will be written into the instance object. + #' + #' @param inst ([OptimInstanceAsyncSingleCrit] | [OptimInstanceAsyncMultiCrit]). + #' + #' @return [data.table::data.table()] + optimize = function(inst) { + pv = self$param_set$values + + # initial design + design = if (is.null(pv$initial_design)) { + + lg$debug("Generating sobol design with size %s", pv$design_size) + generate_design_sobol(inst$search_space, n = pv$design_size)$data + } else { + + lg$debug("Using provided initial design with size %s", nrow(pv$initial_design)) + pv$initial_design + } + + optimize_async_default(inst, self, design, n_workers = pv$n_workers) + } + ), + + private = list( + + .optimize = function(inst) { + pv = self$param_set$values + search_space = inst$search_space + archive = inst$archive + + # sample lambda from exponential distribution + lambda_0 = rexp(1, 1 / pv$lambda) + t = 0 + + surrogate = default_surrogate(inst) + surrogate$param_set$set_values(impute_method = pv$impute_method) + acq_function = acqf("cb", lambda = runif(1, 1 , 3)) + acq_optimizer = acqo(opt("random_search", batch_size = 1000L), terminator = trm("evals", n_evals = 10000L)) + surrogate$archive = inst$archive + acq_function$surrogate = surrogate + acq_optimizer$acq_function = acq_function + + lg$debug("Optimizer '%s' evaluates the initial design", self$id) + evaluate_queue_default(inst) + + lg$debug("Optimizer '%s' starts the tuning phase", self$id) + + # actual loop + while (!inst$is_terminated) { + + # decrease lambda + if (pv$exponential_decay) { + lambda = lambda_0 * exp(-pv$rate * (t %% pv$period)) + t = t + 1 + } else { + lambda = pv$lambda + } + + # sample + acq_function$constants$set_values(lambda = lambda) + acq_function$surrogate$update() + acq_function$update() + xdt = acq_optimizer$optimize() + + # transpose point + xss = transpose_list(xdt) + xs = xss[[1]][inst$archive$cols_x] + lg$trace("Optimizer '%s' draws %s", self$id, as_short_string(xs)) + xs_trafoed = trafo_xs(xs, search_space) + + # eval + key = archive$push_running_point(xs) + ys = inst$objective$eval(xs_trafoed) + + # push result + extra = c(xss[[1]][c("acq_cb", ".already_evaluated")], list(lambda_0 = lambda_0, lambda = lambda)) + archive$push_result(key, ys, x_domain = xs_trafoed, extra = extra) + } + } + ) +) + +#' @include aaa.R +optimizers[["adbo"]] = OptimizerADBO diff --git a/attic/TunerADBO.R b/attic/TunerADBO.R new file mode 100644 index 00000000..6c89074f --- /dev/null +++ b/attic/TunerADBO.R @@ -0,0 +1,48 @@ +#' @title Asynchronous Decentralized Bayesian Optimization +#' @name mlr_tuners_adbo +#' +#' @description +#' Asynchronous Decentralized Bayesian Optimization (ADBO). +#' +#' @note +#' The \eqn{\lambda} parameter of the upper confidence bound acquisition function controls the trade-off between exploration and exploitation. +#' A large \eqn{\lambda} value leads to more exploration, while a small \eqn{\lambda} value leads to more exploitation. +#' ADBO can use periodic exponential decay to reduce \eqn{\lambda} periodically to the exploitation phase. +#' +#' @section Parameters: +#' \describe{ +#' \item{`lambda`}{`numeric(1)`\cr +#' \eqn{\lambda} value used for the confidence bound. +#' Defaults to `1.96`.} +#' \item{`exponential_decay`}{`lgl(1)`\cr +#' Whether to use periodic exponential decay for \eqn{\lambda}.} +#' \item{`rate`}{`numeric(1)`\cr +#' Rate of the exponential decay.} +#' \item{`t`}{`integer(1)`\cr +#' Period of the exponential decay.} +#' \item{`initial_design_size`}{`integer(1)`\cr +#' Size of the initial design.} +#' \item{`initial_design`}{`data.table`\cr +#' Initial design.} +#' } +#' +#' @export +TunerADBO = R6Class("TunerADBO", + inherit = mlr3tuning::TunerAsyncFromOptimizerAsync, + public = list( + + #' @description + #' Creates a new instance of this [R6][R6::R6Class] class. + initialize = function() { + super$initialize( + optimizer = OptimizerADBO$new(), + man = "mlr3tuning::mlr_tuners_adbo" + ) + } + ) +) + +mlr_tuners$add("adbo", TunerADBO) + +#' @include aaa.R +tuners[["adbo"]] = TunerADBO diff --git a/attic/test_OptimizerADBO.R b/attic/test_OptimizerADBO.R new file mode 100644 index 00000000..1c91606e --- /dev/null +++ b/attic/test_OptimizerADBO.R @@ -0,0 +1,15 @@ +# test_that("adbo optimizer works", { +# skip_on_cran() +# skip_if_not_installed("rush") +# flush_redis() + +# rush::rush_plan(n_workers = 2) +# instance = oi_async( +# objective = OBJ_2D, +# search_space = PS_2D, +# terminator = trm("evals", n_evals = 100), +# ) +# optimizer = opt("adbo", design_size = 4) +# optimizer$optimize(instance) +# }) + diff --git a/attic/test_TunerADBO.R b/attic/test_TunerADBO.R new file mode 100644 index 00000000..5d677bff --- /dev/null +++ b/attic/test_TunerADBO.R @@ -0,0 +1,110 @@ +test_that("adbo tuner works", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + learner = lrn("classif.rpart", + minsplit = to_tune(2, 128), + cp = to_tune(1e-04, 1e-1)) + + rush::rush_plan(n_workers = 4) + instance = ti_async( + task = tsk("pima"), + learner = learner, + resampling = rsmp("cv", folds = 3), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 20), + store_benchmark_result = FALSE + ) + + tuner = tnr("adbo", design_size = 4) + tuner$optimize(instance) + + expect_data_table(instance$archive$data, min.rows = 20L) + expect_rush_reset(instance$rush) +}) + +test_that("adbo works with transformation functions", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + learner = lrn("classif.rpart", + minsplit = to_tune(2, 128, logscale = TRUE), + cp = to_tune(1e-04, 1e-1, logscale = TRUE)) + + rush::rush_plan(n_workers = 2) + instance = ti_async( + task = tsk("pima"), + learner = learner, + resampling = rsmp("cv", folds = 3), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 20), + store_benchmark_result = FALSE + ) + + optimizer = tnr("adbo", design_size = 4) + optimizer$optimize(instance) + + expect_data_table(instance$archive$data, min.rows = 20) + expect_rush_reset(instance$rush) +}) + +test_that("search works with dependencies", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + learner = lrn("classif.rpart", + minsplit = to_tune(p_int(2, 128, depends = keep_model == TRUE)), + cp = to_tune(1e-04, 1e-1), + keep_model = to_tune()) + + rush::rush_plan(n_workers = 2) + instance = ti_async( + task = tsk("pima"), + learner = learner, + resampling = rsmp("cv", folds = 3), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 20), + store_benchmark_result = FALSE + ) + + optimizer = tnr("adbo", design_size = 4) + optimizer$optimize(instance) + + expect_data_table(instance$archive$data, min.rows = 20) + expect_rush_reset(instance$rush) +}) + +test_that("adbo works with branching", { + skip_on_cran() + skip_if_not_installed("rush") + skip_if_not_installed("mlr3pipelines") + flush_redis() + library(mlr3pipelines) + + graph_learner = as_learner(ppl("branch", graphs = list(rpart = lrn("classif.rpart", id = "rpart"),debug = lrn("classif.debug", id = "debug")))) + graph_learner$param_set$set_values( + "rpart.cp" = to_tune(p_dbl(1e-04, 1e-1, depends = branch.selection == "rpart")), + "rpart.minsplit" = to_tune(p_int(2, 128, depends = branch.selection == "rpart")), + "debug.x" = to_tune(p_dbl(0, 1, depends = branch.selection == "debug")), + "branch.selection" = to_tune(c("rpart", "debug")) + ) + + rush::rush_plan(n_workers = 2) + instance = ti_async( + task = tsk("pima"), + learner = graph_learner, + resampling = rsmp("cv", folds = 3), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 20), + store_benchmark_result = FALSE + ) + + optimizer = tnr("adbo", design_size = 4) + optimizer$optimize(instance) + + expect_data_table(instance$archive$data, min.rows = 20) + expect_rush_reset(instance$rush) +}) diff --git a/man/SurrogateLearnerAsync.Rd b/man/SurrogateLearnerAsync.Rd new file mode 100644 index 00000000..911f9cef --- /dev/null +++ b/man/SurrogateLearnerAsync.Rd @@ -0,0 +1,104 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SurrogateLearnerAsync.R +\name{SurrogateLearnerAsync} +\alias{SurrogateLearnerAsync} +\title{Surrogate Model Containing a Single Learner} +\description{ +Surrogate model containing a single \link[mlr3:LearnerRegr]{mlr3::LearnerRegr}. +} +\section{Parameters}{ + +\describe{ +\item{\code{assert_insample_perf}}{\code{logical(1)}\cr +Should the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr} be asserted after updating the surrogate? +If the assertion fails (i.e., the insample performance based on the \code{perf_measure} does not meet the +\code{perf_threshold}), an error is thrown. +Default is \code{FALSE}. +} +\item{\code{perf_measure}}{\link[mlr3:MeasureRegr]{mlr3::MeasureRegr}\cr +Performance measure which should be use to assert the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr}. +Only relevant if \code{assert_insample_perf = TRUE}. +Default is \link[mlr3:mlr_measures_regr.rsq]{mlr3::mlr_measures_regr.rsq}. +} +\item{\code{perf_threshold}}{\code{numeric(1)}\cr +Threshold the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr} should be asserted against. +Only relevant if \code{assert_insample_perf = TRUE}. +Default is \code{0}. +} +\item{\code{catch_errors}}{\code{logical(1)}\cr +Should errors during updating the surrogate be caught and propagated to the \code{loop_function} which can then handle +the failed acquisition function optimization (as a result of the failed surrogate) appropriately by, e.g., proposing a randomly sampled point for evaluation? +Default is \code{TRUE}. +} +\item{\code{impute_method}}{\code{character(1)}\cr +Method to impute missing values in the surrogate model. +} +} +} + +\section{Super classes}{ +\code{\link[mlr3mbo:Surrogate]{mlr3mbo::Surrogate}} -> \code{\link[mlr3mbo:SurrogateLearner]{mlr3mbo::SurrogateLearner}} -> \code{SurrogateLearnerAsync} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-SurrogateLearnerAsync-new}{\code{SurrogateLearnerAsync$new()}} +\item \href{#method-SurrogateLearnerAsync-clone}{\code{SurrogateLearnerAsync$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-SurrogateLearnerAsync-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{SurrogateLearnerAsync$new(learner, archive = NULL, cols_x = NULL, col_y = NULL)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{learner}}{(\link[mlr3:LearnerRegr]{mlr3::LearnerRegr}).} + +\item{\code{archive}}{(\link[bbotk:Archive]{bbotk::Archive} | \code{NULL})\cr +\link[bbotk:Archive]{bbotk::Archive} of the \link[bbotk:OptimInstance]{bbotk::OptimInstance}.} + +\item{\code{cols_x}}{(\code{character()} | \code{NULL})\cr +Column id's of variables that should be used as features. +By default, automatically inferred based on the archive.} + +\item{\code{col_y}}{(\code{character(1)} | \code{NULL})\cr +Column id of variable that should be used as a target. +By default, automatically inferred based on the archive.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-SurrogateLearnerAsync-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{SurrogateLearnerAsync$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/mlr3mbo.lambda_decay.Rd b/man/mlr3mbo.lambda_decay.Rd new file mode 100644 index 00000000..163dd8fc --- /dev/null +++ b/man/mlr3mbo.lambda_decay.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/mlr_callbacks.R +\name{mlr3mbo.lambda_decay} +\alias{mlr3mbo.lambda_decay} +\title{Exponential Decay of Lambda} +\arguments{ +\item{rate}{(\code{numeric(1)})\cr +Rate of the exponential decay. +Defaults to \code{0.1}.} + +\item{period}{(\code{integer(1)})\cr +Period of the exponential decay. +Defaults to \code{25}.} +} +\description{ +This \link{CallbackAsyncTuning} decays the lambda parameter of the confidence bounds acquisition function. +The initial lambda value is drawn from an exponential distribution with rate \code{1 / lambda}. +The lambda value is updated after each evaluation by the formula \code{lambda * exp(-rate * (t \%\% period))}. +} +\examples{ +clbk("mlr3mbo.exponential_lambda_decay", rate = 0.1, period = 25) +} diff --git a/man/mlr3mbo.sample_epsilon.Rd b/man/mlr3mbo.sample_epsilon.Rd new file mode 100644 index 00000000..3c2408e9 --- /dev/null +++ b/man/mlr3mbo.sample_epsilon.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/mlr_callbacks.R +\name{mlr3mbo.sample_epsilon} +\alias{mlr3mbo.sample_epsilon} +\title{Epsilon Decay} +\arguments{ +\item{rate}{(\code{numeric(1)})\cr +Rate of the exponential decay. +Defaults to \code{0.1}.} + +\item{period}{(\code{integer(1)})\cr +Period of the exponential decay. +Defaults to \code{25}.} +} +\description{ +This \link{CallbackAsyncTuning} decays the epsilon parameter of the expected improvement acquisition function. +} +\examples{ +clbk("mlr3mbo.sample_epsilon", rate = 0.1, period = 25) +} diff --git a/man/mlr3mbo.sample_lambda.Rd b/man/mlr3mbo.sample_lambda.Rd new file mode 100644 index 00000000..585f0d12 --- /dev/null +++ b/man/mlr3mbo.sample_lambda.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/mlr_callbacks.R +\name{mlr3mbo.sample_lambda} +\alias{mlr3mbo.sample_lambda} +\title{Sample Lambda from an Uniform Distribution} +\arguments{ +\item{min_lambda}{(\code{numeric(1)})\cr +Minimum value of the lambda parameter. +Defaults to \code{0.01}.} + +\item{max_lambda}{(\code{numeric(1)})\cr +Maximum value of the lambda parameter. +Defaults to \code{10}.} +} +\description{ +This \link{CallbackAsyncTuning} samples the lambda parameter of the confidence bounds acquisition function. +The lambda value is drawn from a uniform distribution with \code{min} and \code{max} as bounds. +} +\examples{ +clbk("mlr3mbo.sample_lambda", min_lambda = 0.01, max_lambda = 10) +} diff --git a/man/mlr3mbo.sample_lambda_exponential.Rd b/man/mlr3mbo.sample_lambda_exponential.Rd new file mode 100644 index 00000000..6a4182f3 --- /dev/null +++ b/man/mlr3mbo.sample_lambda_exponential.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/mlr_callbacks.R +\name{mlr3mbo.sample_lambda_exponential} +\alias{mlr3mbo.sample_lambda_exponential} +\title{Sample Lambda from an Exponential Distribution} +\description{ +This \link{CallbackAsyncTuning} samples the lambda parameter of the confidence bounds acquisition function. +The lambda value is drawn from an exponential distribution with rate \code{1 / lambda}. +} +\examples{ +clbk("mlr3mbo.sample_lambda_exponential") +} diff --git a/man/mlr_optimizers_async_mbo.Rd b/man/mlr_optimizers_async_mbo.Rd new file mode 100644 index 00000000..7a90fb47 --- /dev/null +++ b/man/mlr_optimizers_async_mbo.Rd @@ -0,0 +1,149 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/OptimizerAsyncMbo.R +\name{mlr_optimizers_async_mbo} +\alias{mlr_optimizers_async_mbo} +\alias{OptimizerAsyncMbo} +\title{Asynchronous Model Based Optimization} +\description{ +\code{OptimizerAsyncMbo} class that implements asynchronous Model Based Optimization (MBO). +} +\section{Parameters}{ + +\describe{ +\item{\code{initial_design}}{\code{data.table::data.table()}\cr +Initial design of the optimization. +If \code{NULL}, a design of size \code{design_size} is generated with \code{design_function}.} +\item{\code{design_size}}{\code{integer(1)}\cr +Size of the initial design.} +\item{\code{design_function}}{\code{character(1)}\cr +Function to generate the initial design. +One of \code{c("random", "sobol", "lhs")}.} +\item{\code{n_workers}}{\code{integer(1)}\cr +Number of parallel workers. +If \code{NULL}, all rush workers set with \code{\link[rush:rush_plan]{rush::rush_plan()}} are used.} +} +} + +\section{Super classes}{ +\code{\link[bbotk:Optimizer]{bbotk::Optimizer}} -> \code{\link[bbotk:OptimizerAsync]{bbotk::OptimizerAsync}} -> \code{OptimizerAsyncMbo} +} +\section{Active bindings}{ +\if{html}{\out{
}} +\describe{ +\item{\code{surrogate}}{(\link{Surrogate} | \code{NULL})\cr +The surrogate.} + +\item{\code{acq_function}}{(\link{AcqFunction} | \code{NULL})\cr +The acquisition function.} + +\item{\code{acq_optimizer}}{(\link{AcqOptimizer} | \code{NULL})\cr +The acquisition function optimizer.} + +\item{\code{param_classes}}{(\code{character()})\cr +Supported parameter classes that the optimizer can optimize. +Determined based on the \code{surrogate} and the \code{acq_optimizer}. +This corresponds to the values given by a \link[paradox:ParamSet]{paradox::ParamSet}'s +\verb{$class} field.} + +\item{\code{properties}}{(\code{character()})\cr +Set of properties of the optimizer. +Must be a subset of \code{\link[bbotk:bbotk_reflections]{bbotk_reflections$optimizer_properties}}. +MBO in principle is very flexible and by default we assume that the optimizer has all properties. +When fully initialized, properties are determined based on the \code{loop_function} and \code{surrogate}.} + +\item{\code{packages}}{(\code{character()})\cr +Set of required packages. +A warning is signaled prior to optimization if at least one of the packages is not installed, but loaded (not attached) later on-demand via \code{\link[=requireNamespace]{requireNamespace()}}. +Required packages are determined based on the \code{acq_function}, \code{surrogate} and the \code{acq_optimizer}.} +} +\if{html}{\out{
}} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-OptimizerAsyncMbo-new}{\code{OptimizerAsyncMbo$new()}} +\item \href{#method-OptimizerAsyncMbo-optimize}{\code{OptimizerAsyncMbo$optimize()}} +\item \href{#method-OptimizerAsyncMbo-clone}{\code{OptimizerAsyncMbo$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncMbo-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncMbo$new( + surrogate = NULL, + acq_function = NULL, + acq_optimizer = NULL, + param_set = NULL +)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{surrogate}}{(\link{Surrogate} | \code{NULL})\cr +The surrogate.} + +\item{\code{acq_function}}{(\link{AcqFunction} | \code{NULL})\cr +The acquisition function.} + +\item{\code{acq_optimizer}}{(\link{AcqOptimizer} | \code{NULL})\cr +The acquisition function optimizer.} + +\item{\code{param_set}}{\link[paradox:ParamSet]{paradox::ParamSet}\cr +Set of control parameters.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncMbo-optimize}{}}} +\subsection{Method \code{optimize()}}{ +Performs the optimization on a \link{OptimInstanceAsyncSingleCrit} or \link{OptimInstanceAsyncMultiCrit} until termination. +The single evaluations will be written into the \link{ArchiveAsync}. +The result will be written into the instance object. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncMbo$optimize(inst)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{inst}}{(\link{OptimInstanceAsyncSingleCrit} | \link{OptimInstanceAsyncMultiCrit}).} +} +\if{html}{\out{
}} +} +\subsection{Returns}{ +\code{\link[data.table:data.table]{data.table::data.table()}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-OptimizerAsyncMbo-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{OptimizerAsyncMbo$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/man/mlr_tuners_async_mbo.Rd b/man/mlr_tuners_async_mbo.Rd new file mode 100644 index 00000000..3dbe203c --- /dev/null +++ b/man/mlr_tuners_async_mbo.Rd @@ -0,0 +1,96 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/TunerAsyncMbo.R +\name{mlr_tuners_async_mbo} +\alias{mlr_tuners_async_mbo} +\alias{TunerAsyncMbo} +\title{Asynchronous Model Based Tuning} +\description{ +\code{TunerAsyncMbo} class that implements asynchronous Model Based Tuning (MBO). +} +\section{Parameters}{ + +\describe{ +\item{\code{initial_design}}{\code{data.table::data.table()}\cr +Initial design of the optimization. +If \code{NULL}, a design of size \code{design_size} is generated with \code{design_function}.} +\item{\code{design_size}}{\code{integer(1)}\cr +Size of the initial design.} +\item{\code{design_function}}{\code{character(1)}\cr +Function to generate the initial design. +One of \code{c("random", "sobol", "lhs")}.} +\item{\code{n_workers}}{\code{integer(1)}\cr +Number of parallel workers. +If \code{NULL}, all rush workers set with \code{\link[rush:rush_plan]{rush::rush_plan()}} are used.} +} +} + +\section{Super classes}{ +\code{\link[mlr3tuning:Tuner]{mlr3tuning::Tuner}} -> \code{\link[mlr3tuning:TunerAsync]{mlr3tuning::TunerAsync}} -> \code{\link[mlr3tuning:TunerAsyncFromOptimizerAsync]{mlr3tuning::TunerAsyncFromOptimizerAsync}} -> \code{TunerAsyncMbo} +} +\section{Methods}{ +\subsection{Public methods}{ +\itemize{ +\item \href{#method-TunerAsyncMbo-new}{\code{TunerAsyncMbo$new()}} +\item \href{#method-TunerAsyncMbo-clone}{\code{TunerAsyncMbo$clone()}} +} +} +\if{html}{\out{ +
Inherited methods + +
+}} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-TunerAsyncMbo-new}{}}} +\subsection{Method \code{new()}}{ +Creates a new instance of this \link[R6:R6Class]{R6} class. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{TunerAsyncMbo$new( + surrogate = NULL, + acq_function = NULL, + acq_optimizer = NULL, + param_set = NULL +)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{surrogate}}{(\link{Surrogate} | \code{NULL})\cr +The surrogate.} + +\item{\code{acq_function}}{(\link{AcqFunction} | \code{NULL})\cr +The acquisition function.} + +\item{\code{acq_optimizer}}{(\link{AcqOptimizer} | \code{NULL})\cr +The acquisition function optimizer.} + +\item{\code{param_set}}{\link[paradox:ParamSet]{paradox::ParamSet}\cr +Set of control parameters.} +} +\if{html}{\out{
}} +} +} +\if{html}{\out{
}} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-TunerAsyncMbo-clone}{}}} +\subsection{Method \code{clone()}}{ +The objects of this class are cloneable with this method. +\subsection{Usage}{ +\if{html}{\out{
}}\preformatted{TunerAsyncMbo$clone(deep = FALSE)}\if{html}{\out{
}} +} + +\subsection{Arguments}{ +\if{html}{\out{
}} +\describe{ +\item{\code{deep}}{Whether to make a deep clone.} +} +\if{html}{\out{
}} +} +} +} diff --git a/tests/testthat/helper.R b/tests/testthat/helper.R index 6acb90d0..14b28839 100644 --- a/tests/testthat/helper.R +++ b/tests/testthat/helper.R @@ -102,12 +102,12 @@ MAKE_DESIGN = function(instance, n = 4L) { if (requireNamespace("mlr3learners") && requireNamespace("DiceKriging") && requireNamespace("rgenoud")) { library(mlr3learners) REGR_KM_NOISY = lrn("regr.km", covtype = "matern3_2", optim.method = "gen", control = list(trace = FALSE, max.generations = 2), nugget.estim = TRUE, jitter = 1e-12) - REGR_KM_NOISY$encapsulate = c(train = "callr", predict = "callr") + REGR_KM_NOISY$encapsulate("callr", lrn("regr.featureless")) REGR_KM_DETERM = lrn("regr.km", covtype = "matern3_2", optim.method = "gen", control = list(trace = FALSE, max.generations = 2), nugget.stability = 10^-8) - REGR_KM_DETERM$encapsulate = c(train = "callr", predict = "callr") + REGR_KM_DETERM$encapsulate("callr", lrn("regr.featureless")) } REGR_FEATURELESS = lrn("regr.featureless") -REGR_FEATURELESS$encapsulate = c(train = "callr", predict = "callr") +REGR_FEATURELESS$encapsulate("callr", lrn("regr.featureless")) OptimizerError = R6Class("OptimizerError", inherit = OptimizerBatch, @@ -201,6 +201,19 @@ expect_acqfunction = function(acqf) { expect_man_exists(acqf$man) } +expect_rush_reset = function(rush, type = "kill") { + processes = rush$processes + rush$reset(type = type) + expect_list(rush$connector$command(c("KEYS", "*")), len = 0) + walk(processes, function(p) p$kill()) +} + +flush_redis = function() { + config = redux::redis_config() + r = redux::hiredis(config) + r$FLUSHDB() +} + sortnames = function(x) { if (!is.null(names(x))) { x = x[order(names(x), decreasing = TRUE)] diff --git a/tests/testthat/test_OptimizerAsyncMbo.R b/tests/testthat/test_OptimizerAsyncMbo.R new file mode 100644 index 00000000..2fbe35ff --- /dev/null +++ b/tests/testthat/test_OptimizerAsyncMbo.R @@ -0,0 +1,163 @@ +test_that("async optimizer works in defaults", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush::rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 10), + ) + optimizer = opt("async_mbo", design_function = "sobol", design_size = 5) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated")) + + expect_rush_reset(instance$rush) +}) + +test_that("async optimizer works with ei", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush::rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 10), + ) + optimizer = opt("async_mbo", + design_function = "sobol", + design_size = 6, + acq_function = acqf("ei")) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_ei", ".already_evaluated")) + + expect_rush_reset(instance$rush) +}) + +test_that("async optimizer works with ei with epsilon decay", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush::rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 26), + callback = clbk("mlr3mbo.epsilon_decay", rate = 0.1, period = 5L) + ) + optimizer = opt("async_mbo", + design_function = "sobol", + design_size = 6, + acq_function = acqf("ei", epsilon = 3)) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_ei", "epsilon_0", "epsilon", ".already_evaluated")) + + expect_rush_reset(instance$rush) +}) + +test_that("async optimizer works with cb", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush::rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 10) + ) + optimizer = opt("async_mbo", + design_function = "sobol", + design_size = 5, + acq_function = acqf("cb", lambda = 3)) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated")) + + expect_rush_reset(instance$rush) +}) + +test_that("async optimizer works with cb with uniformly sampled lambda", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush::rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 20), + callback = clbk("mlr3mbo.sample_lambda_uniform") + ) + optimizer = opt("async_mbo", + design_function = "sobol", + design_size = 5, + acq_function = acqf("cb", lambda = 3)) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_cb", "lambda", ".already_evaluated")) + expect_length(unique(instance$archive$data$lambda), 2) + + expect_rush_reset(instance$rush) +}) + +test_that("async optimizer works with cb with exponentially sampled lambda", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush::rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 20), + callback = clbk("mlr3mbo.sample_lambda_exponential") + ) + optimizer = opt("async_mbo", + design_function = "sobol", + design_size = 5, + acq_function = acqf("cb", lambda = 3)) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_cb", "lambda", ".already_evaluated")) + expect_length(unique(instance$archive$data$lambda), 2) + + expect_rush_reset(instance$rush) +}) + +test_that("async mbo works with exponential lambda decay", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + rush::rush_plan(n_workers = 2) + instance = oi_async( + objective = OBJ_2D, + search_space = PS_2D, + terminator = trm("evals", n_evals = 10), + callback = clbk("mlr3mbo.exponential_lambda_decay") + ) + optimizer = opt("async_mbo", + design_function = "sobol", + design_size = 5, + acq_function = acqf("cb", lambda = 3)) + + expect_data_table(optimizer$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "lambda_0", "lambda")) + + expect_rush_reset(instance$rush) +}) diff --git a/tests/testthat/test_TunerAsyncMbo.R b/tests/testthat/test_TunerAsyncMbo.R new file mode 100644 index 00000000..51906976 --- /dev/null +++ b/tests/testthat/test_TunerAsyncMbo.R @@ -0,0 +1,87 @@ + +test_that("async mbo tuner works", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + learner = lrn("classif.rpart", + minsplit = to_tune(2, 128), + cp = to_tune(1e-04, 1e-1)) + + rush::rush_plan(n_workers = 4) + instance = ti_async( + task = tsk("pima"), + learner = learner, + resampling = rsmp("cv", folds = 3), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 20), + store_benchmark_result = FALSE + ) + + tuner = tnr("async_mbo", design_size = 4) + + tuner$optimize(instance) +}) + +test_that("async tuner works with ei", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + learner = lrn("classif.rpart", + minsplit = to_tune(2, 128), + cp = to_tune(1e-04, 1e-1)) + + rush::rush_plan(n_workers = 2) + instance = ti_async( + task = tsk("pima"), + learner = learner, + resampling = rsmp("cv", folds = 3), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 20), + store_benchmark_result = FALSE + ) + + tuner = tnr("async_mbo", + design_function = "sobol", + design_size = 6, + acq_function = acqf("ei")) + + expect_data_table(tuner$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_ei", ".already_evaluated")) + + expect_rush_reset(instance$rush) +}) + +test_that("async tuner works with exponential lambda decay", { + skip_on_cran() + skip_if_not_installed("rush") + flush_redis() + + learner = lrn("classif.rpart", + minsplit = to_tune(2, 128), + cp = to_tune(1e-04, 1e-1)) + + rush::rush_plan(n_workers = 2) + instance = ti_async( + task = tsk("pima"), + learner = learner, + resampling = rsmp("cv", folds = 3), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 20), + store_benchmark_result = FALSE, + callback = clbk("mlr3mbo.exponential_lambda_decay") + ) + + tuner = tnr("async_mbo", + design_function = "sobol", + design_size = 5, + acq_function = acqf("cb", lambda = 3)) + + expect_data_table(tuner$optimize(instance), nrows = 1) + expect_data_table(instance$archive$data, min.rows = 10) + expect_names(names(instance$archive$data), must.include = c("acq_cb", ".already_evaluated", "lambda_0", "lambda")) + + expect_rush_reset(instance$rush) +}) diff --git a/tests/testthat/test_mbo_defaults.R b/tests/testthat/test_mbo_defaults.R index 7cef2365..062c35e1 100644 --- a/tests/testthat/test_mbo_defaults.R +++ b/tests/testthat/test_mbo_defaults.R @@ -23,7 +23,7 @@ test_that("default_surrogate", { expect_r6(surrogate$learner, "LearnerRegrKM") expect_equal_sorted(surrogate$learner$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.stability = 1e-08)) - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") # singlecrit all numeric, noisy @@ -32,7 +32,7 @@ test_that("default_surrogate", { expect_r6(surrogate$learner, "LearnerRegrKM") expect_equal_sorted(surrogate$learner$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.estim = TRUE, jitter = 1e-12)) - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") # twocrit all numeric, deterministic @@ -41,11 +41,11 @@ test_that("default_surrogate", { expect_list(surrogate$learner, types = "LearnerRegrKM") expect_equal_sorted(surrogate$learner[[1L]]$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.stability = 1e-08)) - expect_equal(surrogate$learner[[1L]]$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner[[1L]]$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner[[1L]]$fallback, "LearnerRegrRanger") expect_equal(surrogate$learner[[1L]]$param_set$values, surrogate$learner[[2L]]$param_set$values) - expect_equal(surrogate$learner[[1L]]$encapsulate, surrogate$learner[[2L]]$encapsulate) - expect_equal(surrogate$learner[[1L]]$fallback, surrogate$learner[[2L]]$fallback) + expect_equal(surrogate$learner[[1L]]$encapsulation, surrogate$learner[[2L]]$encapsulation) + # expect_equal(surrogate$learner[[1L]]$fallback, surrogate$learner[[2L]]$fallback) # twocrit all numeric, noisy surrogate = default_surrogate(MAKE_INST(OBJ_1D_2_NOISY, search_space = PS_1D)) @@ -53,10 +53,10 @@ test_that("default_surrogate", { expect_list(surrogate$learner, types = "LearnerRegrKM") expect_equal_sorted(surrogate$learner[[1L]]$param_set$values, list(covtype = "matern5_2", optim.method = "gen", control = list(trace = FALSE), nugget.estim = TRUE, jitter = 1e-12)) - expect_equal(surrogate$learner[[1L]]$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner[[1L]]$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner[[1L]]$fallback, "LearnerRegrRanger") expect_equal(surrogate$learner[[1L]]$param_set$values, surrogate$learner[[2L]]$param_set$values) - expect_equal(surrogate$learner[[1L]]$encapsulate, surrogate$learner[[2L]]$encapsulate) + expect_equal(surrogate$learner[[1L]]$encapsulation, surrogate$learner[[2L]]$encapsulation) expect_equal(surrogate$learner[[1L]]$fallback, surrogate$learner[[2L]]$fallback) # singlecrit mixed input @@ -65,7 +65,7 @@ test_that("default_surrogate", { expect_r6(surrogate$learner, "LearnerRegrRanger") expect_equal_sorted(surrogate$learner$param_set$values, list(num.threads = 1L, num.trees = 100L, keep.inbag = TRUE, se.method = "jack")) - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") # twocrit mixed input @@ -74,10 +74,10 @@ test_that("default_surrogate", { expect_list(surrogate$learner, types = "LearnerRegrRanger") expect_equal_sorted(surrogate$learner[[1L]]$param_set$values, list(num.threads = 1L, num.trees = 100L, keep.inbag = TRUE, se.method = "jack")) - expect_equal(surrogate$learner[[1L]]$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner[[1L]]$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner[[1L]]$fallback, "LearnerRegrRanger") expect_equal(surrogate$learner[[1L]]$param_set$values, surrogate$learner[[2L]]$param_set$values) - expect_equal(surrogate$learner[[1L]]$encapsulate, surrogate$learner[[2L]]$encapsulate) + expect_equal(surrogate$learner[[1L]]$encapsulation, surrogate$learner[[2L]]$encapsulation) expect_equal(surrogate$learner[[1L]]$fallback, surrogate$learner[[2L]]$fallback) # singlecrit mixed input deps @@ -152,12 +152,11 @@ test_that("stability and defaults", { # this should trigger a mbo_error instance = MAKE_INST_1D(terminator = trm("evals", n_evals = 5L)) learner = LearnerRegrError$new() - learner$encapsulate[c("train", "predict")] = "evaluate" - learner$fallback = lrn("regr.ranger", num.trees = 10L, keep.inbag = TRUE, se.method = "jack") + learner$encapsulate("evaluate", lrn("regr.ranger", num.trees = 10L, keep.inbag = TRUE, se.method = "jack")) surrogate = default_surrogate(instance, learner = learner, n_learner = 1L) expect_r6(surrogate, "SurrogateLearner") expect_r6(surrogate$learner, "LearnerRegrError") - expect_equal(surrogate$learner$encapsulate, c(train = "evaluate", predict = "evaluate")) + expect_equal(surrogate$learner$encapsulation, c(train = "evaluate", predict = "evaluate")) expect_r6(surrogate$learner$fallback, "LearnerRegrRanger") acq_function = default_acqfunction(instance) expect_r6(acq_function, "AcqFunctionEI") @@ -173,16 +172,16 @@ test_that("stability and defaults", { # Nothing should happen here due to the fallback learner expect_true(sum(grepl("Surrogate Train Error", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 0L) - acq_function$surrogate$learner$reset() - acq_function$surrogate$learner$fallback = NULL - instance$archive$clear() - bayesopt_ego(instance, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer) - expect_true(nrow(instance$archive$data) == 5L) - lines = readLines(f) - # Training fails but this error is not logged due to the "evaluate" encapsulate - expect_equal(acq_function$surrogate$learner$errors, "Surrogate Train Error.") - expect_true(sum(grepl("Surrogate Train Error", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 0L) - expect_true(sum(grepl("Cannot predict", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) - expect_true(sum(grepl("Proposing a randomly sampled point", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) + # acq_function$surrogate$learner$reset() + # acq_function$surrogate$learner$fallback = NULL + # instance$archive$clear() + # bayesopt_ego(instance, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer) + # expect_true(nrow(instance$archive$data) == 5L) + # lines = readLines(f) + # # Training fails but this error is not logged due to the "evaluate" encapsulate + # expect_equal(acq_function$surrogate$learner$errors, "Surrogate Train Error.") + # expect_true(sum(grepl("Surrogate Train Error", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 0L) + # expect_true(sum(grepl("Cannot predict", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) + # expect_true(sum(grepl("Proposing a randomly sampled point", unlist(map(strsplit(lines, "\\[bbotk\\] "), 2L)))) == 1L) })