diff --git a/.lintr b/.lintr
index 436e1b52..6fa3c0ff 100644
--- a/.lintr
+++ b/.lintr
@@ -5,5 +5,6 @@ linters: linters_with_defaults(
object_name_linter = object_name_linter(c("snake_case", "CamelCase")), # only allow snake case and camel case object names
cyclocomp_linter = NULL, # do not check function complexity
commented_code_linter = NULL, # allow code in comments
- line_length_linter = line_length_linter(120)
+ line_length_linter = line_length_linter(120),
+ indentation_linter(indent = 2L, hanging_indent_style = "never")
)
diff --git a/DESCRIPTION b/DESCRIPTION
index 48499663..f146ca46 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -41,13 +41,13 @@ BugReports: https://github.com/mlr-org/mlr3mbo/issues
Depends:
R (>= 3.1.0)
Imports:
- bbotk (>= 1.0.0),
+ bbotk (>= 1.1.0),
checkmate (>= 2.0.0),
data.table,
lgr (>= 0.3.4),
- mlr3 (>= 0.14.0),
+ mlr3 (>= 0.20.2.9000),
mlr3misc (>= 0.11.0),
- mlr3tuning (>= 1.0.0),
+ mlr3tuning (>= 1.0.1),
paradox (>= 1.0.0),
spacefillr,
R6 (>= 2.4.1)
@@ -64,9 +64,11 @@ Suggests:
rgenoud,
rmarkdown,
rpart,
+ rush,
stringi,
testthat (>= 3.0.0)
-Remotes: mlr-org/bbotk
+Remotes:
+ mlr-org/mlr3
ByteCompile: no
Encoding: UTF-8
Config/testthat/edition: 3
@@ -90,6 +92,7 @@ Collate:
'AcqFunctionSmsEgo.R'
'AcqOptimizer.R'
'aaa.R'
+ 'OptimizerAsyncMbo.R'
'OptimizerMbo.R'
'mlr_result_assigners.R'
'ResultAssigner.R'
@@ -97,7 +100,9 @@ Collate:
'ResultAssignerSurrogate.R'
'Surrogate.R'
'SurrogateLearner.R'
+ 'SurrogateLearnerAsync.R'
'SurrogateLearnerCollection.R'
+ 'TunerAsyncMbo.R'
'TunerMbo.R'
'mlr_loop_functions.R'
'bayesopt_ego.R'
@@ -109,6 +114,7 @@ Collate:
'helper.R'
'loop_function.R'
'mbo_defaults.R'
+ 'mlr_callbacks.R'
'sugar.R'
'zzz.R'
VignetteBuilder: knitr
diff --git a/NAMESPACE b/NAMESPACE
index 9b8e3b96..a12ea3ba 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -17,13 +17,16 @@ export(AcqFunctionPI)
export(AcqFunctionSD)
export(AcqFunctionSmsEgo)
export(AcqOptimizer)
+export(OptimizerAsyncMbo)
export(OptimizerMbo)
export(ResultAssigner)
export(ResultAssignerArchive)
export(ResultAssignerSurrogate)
export(Surrogate)
export(SurrogateLearner)
+export(SurrogateLearnerAsync)
export(SurrogateLearnerCollection)
+export(TunerAsyncMbo)
export(TunerMbo)
export(acqf)
export(acqfs)
@@ -58,6 +61,7 @@ importFrom(R6,R6Class)
importFrom(stats,dnorm)
importFrom(stats,pnorm)
importFrom(stats,quantile)
+importFrom(stats,rexp)
importFrom(stats,runif)
importFrom(stats,setNames)
importFrom(utils,bibentry)
diff --git a/R/AcqFunctionCB.R b/R/AcqFunctionCB.R
index 81328e05..358d62f6 100644
--- a/R/AcqFunctionCB.R
+++ b/R/AcqFunctionCB.R
@@ -64,17 +64,25 @@ AcqFunctionCB = R6Class("AcqFunctionCB",
assert_r6(surrogate, "SurrogateLearner", null.ok = TRUE)
assert_number(lambda, lower = 0, finite = TRUE)
- constants = ps(lambda = p_dbl(lower = 0, default = 2))
+ constants = ps(
+ lambda = p_dbl(lower = 0, default = 2)
+ )
constants$values$lambda = lambda
- super$initialize("acq_cb", constants = constants, surrogate = surrogate, requires_predict_type_se = TRUE, direction = "same", label = "Lower / Upper Confidence Bound", man = "mlr3mbo::mlr_acqfunctions_cb")
+ super$initialize("acq_cb",
+ constants = constants,
+ surrogate = surrogate,
+ requires_predict_type_se = TRUE,
+ direction = "same",
+ label = "Lower / Upper Confidence Bound",
+ man = "mlr3mbo::mlr_acqfunctions_cb")
}
),
private = list(
- .fun = function(xdt, ...) {
- constants = list(...)
- lambda = constants$lambda
+ .fun = function(xdt, lambda) {
+ #constants = list(...)
+ #lambda = constants$lambda
p = self$surrogate$predict(xdt)
cb = p$mean - self$surrogate_max_to_min * lambda * p$se
data.table(acq_cb = cb)
diff --git a/R/OptimizerAsyncMbo.R b/R/OptimizerAsyncMbo.R
new file mode 100644
index 00000000..2a9c67f9
--- /dev/null
+++ b/R/OptimizerAsyncMbo.R
@@ -0,0 +1,203 @@
+#' @title Asynchronous Model Based Optimization
+#'
+#' @name mlr_optimizers_async_mbo
+#'
+#' @description
+#' `OptimizerAsyncMbo` class that implements asynchronous Model Based Optimization (MBO).
+#'
+#' @section Parameters:
+#' \describe{
+#' \item{`initial_design`}{`data.table::data.table()`\cr
+#' Initial design of the optimization.
+#' If `NULL`, a design of size `design_size` is generated with `design_function`.}
+#' \item{`design_size`}{`integer(1)`\cr
+#' Size of the initial design.}
+#' \item{`design_function`}{`character(1)`\cr
+#' Function to generate the initial design.
+#' One of `c("random", "sobol", "lhs")`.}
+#' \item{`n_workers`}{`integer(1)`\cr
+#' Number of parallel workers.
+#' If `NULL`, all rush workers set with [rush::rush_plan()] are used.}
+#' }
+#'
+#' @template param_surrogate
+#' @template param_acq_function
+#' @template param_acq_optimizer
+#'
+#' @param param_set [paradox::ParamSet]\cr
+#' Set of control parameters.
+#'
+#' @export
+OptimizerAsyncMbo = R6Class("OptimizerAsyncMbo",
+ inherit = OptimizerAsync,
+
+ public = list(
+
+ #' @description
+ #' Creates a new instance of this [R6][R6::R6Class] class.
+ initialize = function(surrogate = NULL, acq_function = NULL, acq_optimizer = NULL, param_set = NULL) {
+ default_param_set = ps(
+ initial_design = p_uty(),
+ design_size = p_int(lower = 1, default = 10),
+ design_function = p_fct(c("random", "sobol", "lhs"), default = "sobol"),
+ n_workers = p_int(lower = 1L)
+ )
+ param_set = c(default_param_set, param_set)
+
+ param_set$set_values(design_size = 10, design_function = "sobol")
+
+ super$initialize("async_mbo",
+ param_set = param_set,
+ param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct"),
+ properties = c("dependencies", "single-crit"),
+ packages = c("mlr3mbo", "rush"),
+ label = "Asynchronous Model Based Optimization",
+ man = "mlr3mbo::OptimizerAsyncMbo")
+
+ self$surrogate = assert_r6(surrogate, classes = "Surrogate", null.ok = TRUE)
+ self$acq_function = assert_r6(acq_function, classes = "AcqFunction", null.ok = TRUE)
+ self$acq_optimizer = assert_r6(acq_optimizer, classes = "AcqOptimizer", null.ok = TRUE)
+ },
+
+
+ #' @description
+ #' Performs the optimization on a [OptimInstanceAsyncSingleCrit] or [OptimInstanceAsyncMultiCrit] until termination.
+ #' The single evaluations will be written into the [ArchiveAsync].
+ #' The result will be written into the instance object.
+ #'
+ #' @param inst ([OptimInstanceAsyncSingleCrit] | [OptimInstanceAsyncMultiCrit]).
+ #'
+ #' @return [data.table::data.table()]
+ optimize = function(inst) {
+ pv = self$param_set$values
+
+ # initial design
+ design = if (is.null(pv$initial_design)) {
+ # generate initial design
+ generate_design = switch(pv$design_function,
+ "random" = generate_design_random,
+ "sobol" = generate_design_sobol,
+ "lhs" = generate_design_lhs)
+
+ lg$debug("Generating sobol design with size %s", pv$design_size)
+ generate_design(inst$search_space, n = pv$design_size)$data
+ } else {
+ # use provided initial design
+ lg$debug("Using provided initial design with size %s", nrow(pv$initial_design))
+ pv$initial_design
+ }
+ optimize_async_default(inst, self, design, n_workers = pv$n_workers)
+ }
+ ),
+
+ active = list(
+ #' @template field_surrogate
+ surrogate = function(rhs) {
+ if (missing(rhs)) {
+ private$.surrogate
+ } else {
+ private$.surrogate = assert_r6(rhs, classes = "Surrogate", null.ok = TRUE)
+ }
+ },
+
+ #' @template field_acq_function
+ acq_function = function(rhs) {
+ if (missing(rhs)) {
+ private$.acq_function
+ } else {
+ private$.acq_function = assert_r6(rhs, classes = "AcqFunction", null.ok = TRUE)
+ }
+ },
+
+ #' @template field_acq_optimizer
+ acq_optimizer = function(rhs) {
+ if (missing(rhs)) {
+ private$.acq_optimizer
+ } else {
+ private$.acq_optimizer = assert_r6(rhs, classes = "AcqOptimizer", null.ok = TRUE)
+ }
+ },
+
+ #' @template field_param_classes
+ param_classes = function(rhs) {
+ assert_ro_binding(rhs)
+ param_classes_surrogate = c("logical" = "ParamLgl", "integer" = "ParamInt", "numeric" = "ParamDbl", "factor" = "ParamFct")
+ if (!is.null(self$surrogate)) {
+ param_classes_surrogate = param_classes_surrogate[c("logical", "integer", "numeric", "factor") %in% self$surrogate$feature_types] # surrogate has precedence over acq_function$surrogate
+ }
+ param_classes_acq_opt = if (!is.null(self$acq_optimizer)) {
+ self$acq_optimizer$optimizer$param_classes
+ } else {
+ c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct")
+ }
+ unname(intersect(param_classes_surrogate, param_classes_acq_opt))
+ },
+
+ #' @template field_properties
+ properties = function(rhs) {
+ assert_ro_binding(rhs)
+
+ properties_surrogate = "dependencies"
+ if (!is.null(self$surrogate)) {
+ if ("missings" %nin% self$surrogate$properties) {
+ properties_surrogate = character()
+ }
+ }
+ unname(c(properties_surrogate))
+ },
+
+ #' @template field_packages
+ packages = function(rhs) {
+ assert_ro_binding(rhs)
+ union("mlr3mbo", c(self$acq_function$packages, self$surrogate$packages, self$acq_optimizer$optimizer$packages))
+ }
+ ),
+
+ private = list(
+ .surrogate = NULL,
+ .acq_function = NULL,
+ .acq_optimizer = NULL,
+
+ .optimize = function(inst) {
+ pv = self$param_set$values
+ search_space = inst$search_space
+ archive = inst$archive
+
+ if (is.null(self$acq_function)) {
+ self$acq_function = self$acq_optimizer$acq_function %??% default_acqfunction(inst)
+ }
+
+ if (is.null(self$surrogate)) {
+ self$surrogate = self$acq_function$surrogate %??% default_surrogate(inst)
+ }
+
+ if (is.null(self$acq_optimizer)) {
+ self$acq_optimizer = default_acqoptimizer(self$acq_function)
+ }
+
+ self$surrogate$archive = inst$archive
+ self$acq_function$surrogate = self$surrogate
+ self$acq_optimizer$acq_function = self$acq_function
+
+ lg$debug("Optimizer '%s' evaluates the initial design", self$id)
+ get_private(inst)$.eval_queue()
+
+ lg$debug("Optimizer '%s' starts the tuning phase", self$id)
+
+ # actual loop
+ while (!inst$is_terminated) {
+ # sample
+ self$acq_function$surrogate$update()
+ self$acq_function$update()
+ xdt = self$acq_optimizer$optimize()
+ xs = transpose_list(xdt)[[1]]
+
+ # eval
+ get_private(inst)$.eval_point(xs)
+ }
+ }
+ )
+)
+
+#' @include aaa.R
+optimizers[["async_mbo"]] = OptimizerAsyncMbo
diff --git a/R/SurrogateLearnerAsync.R b/R/SurrogateLearnerAsync.R
new file mode 100644
index 00000000..584f463b
--- /dev/null
+++ b/R/SurrogateLearnerAsync.R
@@ -0,0 +1,87 @@
+#' @title Surrogate Model Containing a Single Learner
+#'
+#' @description
+#' Surrogate model containing a single [mlr3::LearnerRegr].
+#'
+#' @section Parameters:
+#' \describe{
+#' \item{`assert_insample_perf`}{`logical(1)`\cr
+#' Should the insample performance of the [mlr3::LearnerRegr] be asserted after updating the surrogate?
+#' If the assertion fails (i.e., the insample performance based on the `perf_measure` does not meet the
+#' `perf_threshold`), an error is thrown.
+#' Default is `FALSE`.
+#' }
+#' \item{`perf_measure`}{[mlr3::MeasureRegr]\cr
+#' Performance measure which should be use to assert the insample performance of the [mlr3::LearnerRegr].
+#' Only relevant if `assert_insample_perf = TRUE`.
+#' Default is [mlr3::mlr_measures_regr.rsq].
+#' }
+#' \item{`perf_threshold`}{`numeric(1)`\cr
+#' Threshold the insample performance of the [mlr3::LearnerRegr] should be asserted against.
+#' Only relevant if `assert_insample_perf = TRUE`.
+#' Default is `0`.
+#' }
+#' \item{`catch_errors`}{`logical(1)`\cr
+#' Should errors during updating the surrogate be caught and propagated to the `loop_function` which can then handle
+#' the failed acquisition function optimization (as a result of the failed surrogate) appropriately by, e.g., proposing a randomly sampled point for evaluation?
+#' Default is `TRUE`.
+#' }
+#' \item{`impute_method`}{`character(1)`\cr
+#' Method to impute missing values in the surrogate model.
+#' }
+#' }
+#'
+#' @export
+SurrogateLearnerAsync = R6Class("SurrogateLearnerAsync",
+ inherit = SurrogateLearner,
+
+ public = list(
+
+ #' @description
+ #' Creates a new instance of this [R6][R6::R6Class] class.
+ #'
+ #' @param learner ([mlr3::LearnerRegr]).
+ #' @template param_archive_surrogate
+ #' @template param_col_y_surrogate
+ #' @template param_cols_x_surrogate
+ initialize = function(learner, archive = NULL, cols_x = NULL, col_y = NULL) {
+ super$initialize(learner = learner, archive = archive, cols_x = cols_x, col_y = col_y)
+
+ extra_ps = ps(
+ impute_method = p_fct(c("mean", "random"), default = "random")
+ )
+
+ private$.param_set = c(private$.param_set, extra_ps)
+ private$.param_set$set_values(impute_method = "random")
+ }
+ ),
+
+ private = list(
+ .update = function() {
+ xydt = self$archive$rush$fetch_tasks_with_state(states = c("queued", "running", "finished"))[, c(self$cols_x, self$cols_y, "state"), with = FALSE]
+
+
+ if (self$param_set$values$impute_method == "mean") {
+ setnafill(xydt, type = "const", fill = mean(xydt[[self$cols_y]], na.rm = TRUE), cols = self$cols_y)
+ } else if (self$param_set$values$impute_method == "random") {
+ walk(self$cols_y, function(col) {
+ min = min(xydt[[col]], na.rm = TRUE)
+ max = max(xydt[[col]], na.rm = TRUE)
+ xydt[c("queued", "running"), (col) := runif(.N, min, max), on = "state"]
+ })
+ }
+ set(xydt, j = "state", value = NULL)
+
+ task = TaskRegr$new(id = "surrogate_task", backend = xydt, target = self$cols_y)
+ assert_learnable(task, learner = self$learner)
+ self$learner$train(task)
+
+ if (self$param_set$values$assert_insample_perf) {
+ measure = assert_measure(self$param_set$values$perf_measure %??% mlr_measures$get("regr.rsq"), task = task, learner = self$learner)
+ private$.insample_perf = self$learner$predict(task)$score(measure, task = task, learner = self$learner)
+ self$assert_insample_perf
+ }
+ }
+ )
+)
+
diff --git a/R/TunerAsyncMbo.R b/R/TunerAsyncMbo.R
new file mode 100644
index 00000000..70afae28
--- /dev/null
+++ b/R/TunerAsyncMbo.R
@@ -0,0 +1,57 @@
+#' @title Asynchronous Model Based Tuning
+#'
+#' @include OptimizerAsyncMbo.R
+#' @name mlr_tuners_async_mbo
+#'
+#' @description
+#' `TunerAsyncMbo` class that implements asynchronous Model Based Tuning (MBO).
+#'
+#' @section Parameters:
+#' \describe{
+#' \item{`initial_design`}{`data.table::data.table()`\cr
+#' Initial design of the optimization.
+#' If `NULL`, a design of size `design_size` is generated with `design_function`.}
+#' \item{`design_size`}{`integer(1)`\cr
+#' Size of the initial design.}
+#' \item{`design_function`}{`character(1)`\cr
+#' Function to generate the initial design.
+#' One of `c("random", "sobol", "lhs")`.}
+#' \item{`n_workers`}{`integer(1)`\cr
+#' Number of parallel workers.
+#' If `NULL`, all rush workers set with [rush::rush_plan()] are used.}
+#' }
+#'
+#' @template param_surrogate
+#' @template param_acq_function
+#' @template param_acq_optimizer
+#'
+#' @param param_set [paradox::ParamSet]\cr
+#' Set of control parameters.
+#'
+#' @export
+TunerAsyncMbo = R6Class("TunerAsyncMbo",
+ inherit = mlr3tuning::TunerAsyncFromOptimizerAsync,
+ public = list(
+
+ #' @description
+ #' Creates a new instance of this [R6][R6::R6Class] class.
+ initialize = function(surrogate = NULL, acq_function = NULL, acq_optimizer = NULL, param_set = NULL) {
+ optimizer = OptimizerAsyncMbo$new(
+ surrogate = surrogate,
+ acq_function = acq_function,
+ acq_optimizer = acq_optimizer,
+ param_set = param_set
+ )
+
+ super$initialize(
+ optimizer = optimizer,
+ man = "mlr3tuning::mlr_tuners_async_mbo"
+ )
+ }
+ )
+)
+
+mlr_tuners$add("async_mbo", TunerAsyncMbo)
+
+#' @include aaa.R
+tuners[["async_mbo"]] = TunerAsyncMbo
diff --git a/R/aaa.R b/R/aaa.R
index ed02c255..4a4686c6 100644
--- a/R/aaa.R
+++ b/R/aaa.R
@@ -1,2 +1,3 @@
optimizers = list()
tuners = list()
+callbacks = list()
diff --git a/R/mbo_defaults.R b/R/mbo_defaults.R
index 69c9664d..ccceb89b 100644
--- a/R/mbo_defaults.R
+++ b/R/mbo_defaults.R
@@ -161,14 +161,13 @@ default_surrogate = function(instance, learner = NULL, n_learner = NULL) {
default_rf(noisy)
}
# stability: evaluate and add a fallback
- learner$encapsulate[c("train", "predict")] = "evaluate"
require_namespaces("ranger")
fallback = mlr3learners::LearnerRegrRanger$new()
fallback$param_set$values = insert_named(
fallback$param_set$values,
list(num.trees = 10L, keep.inbag = TRUE, se.method = "jack")
)
- learner$fallback = fallback
+ learner$encapsulate("evaluate", fallback)
if (has_deps) {
require_namespaces("mlr3pipelines")
@@ -184,14 +183,15 @@ default_surrogate = function(instance, learner = NULL, n_learner = NULL) {
learner
)
)
- learner$encapsulate[c("train", "predict")] = "evaluate"
- learner$fallback = LearnerRegrFeatureless$new()
+ learner$encapsulate("evaluate", lrn("regr.featureless"))
}
}
if (is.null(n_learner)) n_learner = length(instance$archive$cols_y)
- if (n_learner == 1L) {
+ if (n_learner == 1L && inherits(instance, "OptimInstanceBatch")) {
SurrogateLearner$new(learner)
+ } else if (n_learner == 1L && inherits(instance, "OptimInstanceAsync")) {
+ SurrogateLearnerAsync$new(learner)
} else {
learners = replicate(n_learner, learner$clone(deep = TRUE), simplify = FALSE)
SurrogateLearnerCollection$new(learners)
@@ -213,6 +213,8 @@ default_acqfunction = function(instance) {
assert_r6(instance, classes = "OptimInstance")
if (inherits(instance, "OptimInstanceBatchSingleCrit")) {
AcqFunctionEI$new()
+ } else if (inherits(instance, "OptimInstanceAsyncSingleCrit")) {
+ AcqFunctionCB$new()
} else if (inherits(instance, "OptimInstanceBatchMultiCrit")) {
AcqFunctionSmsEgo$new()
}
diff --git a/R/mlr_callbacks.R b/R/mlr_callbacks.R
new file mode 100644
index 00000000..51c8de61
--- /dev/null
+++ b/R/mlr_callbacks.R
@@ -0,0 +1,177 @@
+#' @title Sample Lambda from an Uniform Distribution
+#'
+#' @name mlr3mbo.sample_lambda
+#'
+#' @description
+#' This [CallbackAsyncTuning] samples the lambda parameter of the confidence bounds acquisition function.
+#' The lambda value is drawn from a uniform distribution with `min` and `max` as bounds.
+#'
+#' @param min_lambda (`numeric(1)`)\cr
+#' Minimum value of the lambda parameter.
+#' Defaults to `0.01`.
+#' @param max_lambda (`numeric(1)`)\cr
+#' Maximum value of the lambda parameter.
+#' Defaults to `10`.
+#'
+#' @examples
+#' clbk("mlr3mbo.sample_lambda", min_lambda = 0.01, max_lambda = 10)
+NULL
+
+load_callback_sample_lambda_uniform = function() {
+ callback_async_tuning("mlr3mbo.sample_lambda_uniform",
+ label = "Sample Lambda Uniform",
+ man = "mlr3mbo::mlr3mbo.sample_lambda_uniform",
+
+ on_optimization_begin = function(callback, context) {
+ assert_class(context$optimizer$acq_function, "AcqFunctionCB")
+ callback$state$min_lambda = assert_number(callback$state$min_lambda, lower = 0, null.ok = TRUE) %??% 0.01
+ callback$state$max_lambda = assert_number(callback$state$max_lambda, lower = 0, null.ok = TRUE) %??% 10
+ },
+
+ on_worker_begin = function(callback, context) {
+ callback$state$lambda = runif(1, min = callback$state$min_lambda, max = callback$state$max_lambda)
+ context$optimizer$acq_function$constants$set_values(lambda = callback$state$lambda)
+ },
+
+ on_optimizer_after_eval = function(callback, context) {
+ context$ys = c(context$ys, list(lambda = callback$state$lambda))
+ }
+ )
+}
+
+callbacks[["mlr3mbo.sample_lambda_uniform"]] = load_callback_sample_lambda_uniform
+
+#' @title Sample Lambda from an Exponential Distribution
+#'
+#' @name mlr3mbo.sample_lambda_exponential
+#'
+#' @description
+#' This [CallbackAsyncTuning] samples the lambda parameter of the confidence bounds acquisition function.
+#' The lambda value is drawn from an exponential distribution with rate `1 / lambda`.
+#'
+#' @examples
+#' clbk("mlr3mbo.sample_lambda_exponential")
+NULL
+
+load_callback_sample_lambda_exponential = function() {
+ callback_async_tuning("mlr3mbo.sample_lambda_exponential",
+ label = "Sample Lambda Exponential",
+ man = "mlr3mbo::mlr3mbo.sample_lambda_exponential",
+
+ on_optimization_begin = function(callback, context) {
+ assert_class(context$optimizer$acq_function, "AcqFunctionCB")
+ },
+
+ on_worker_begin = function(callback, context) {
+ lambda = context$optimizer$acq_function$constants$values$lambda
+ callback$state$lambda = rexp(1, 1 / lambda)
+ context$optimizer$acq_function$constants$set_values(lambda = callback$state$lambda)
+ },
+
+ on_optimizer_after_eval = function(callback, context) {
+ context$ys = c(context$ys, list(lambda = callback$state$lambda))
+ }
+ )
+}
+
+callbacks[["mlr3mbo.sample_lambda_exponential"]] = load_callback_sample_lambda_exponential
+
+#' @title Exponential Decay of Lambda
+#'
+#' @name mlr3mbo.lambda_decay
+#'
+#' @description
+#' This [CallbackAsyncTuning] decays the lambda parameter of the confidence bounds acquisition function.
+#' The initial lambda value is drawn from an exponential distribution with rate `1 / lambda`.
+#' The lambda value is updated after each evaluation by the formula `lambda * exp(-rate * (t %% period))`.
+#'
+#' @param rate (`numeric(1)`)\cr
+#' Rate of the exponential decay.
+#' Defaults to `0.1`.
+#' @param period (`integer(1)`)\cr
+#' Period of the exponential decay.
+#' Defaults to `25`.
+#'
+#' @examples
+#' clbk("mlr3mbo.exponential_lambda_decay", rate = 0.1, period = 25)
+NULL
+
+load_callback_exponential_lambda_decay = function() {
+ callback_async_tuning("mlr3mbo.exponential_lambda_decay",
+ label = "Exponential Decay of Lambda",
+ man = "mlr3mbo::mlr3mbo.exponential_lambda_decay",
+
+ on_optimization_begin = function(callback, context) {
+ assert_class(context$optimizer$acq_function, "AcqFunctionCB")
+ callback$state$rate = assert_number(callback$state$rate, lower = 0, null.ok = TRUE) %??% 0.1
+ callback$state$period = assert_integer(callback$state$period, lower = 1, null.ok = TRUE) %??% 25
+ callback$state$t = 0
+ },
+
+ on_worker_begin = function(callback, context) {
+ lambda = context$optimizer$acq_function$constants$values$lambda
+ callback$state$lambda_0 = rexp(1, 1 / lambda)
+ context$optimizer$acq_function$constants$set_values(lambda = callback$state$lambda_0)
+ },
+
+ on_optimizer_after_eval = function(callback, context) {
+ if (!is.null(context$extra)) { # skip initial design
+ lambda_0 = callback$state$lambda_0
+ t = callback$state$t
+ lambda = lambda_0 * exp(-callback$state$rate * (t %% callback$state$period))
+ callback$state$t = t + 1
+ context$optimizer$acq_function$constants$set_values(lambda = lambda)
+ context$ys = c(context$ys, list(lambda_0 = callback$state$lambda_0, lambda = lambda))
+ }
+ }
+ )
+}
+
+callbacks[["mlr3mbo.exponential_lambda_decay"]] = load_callback_exponential_lambda_decay
+
+#' @title Epsilon Decay
+#'
+#' @name mlr3mbo.sample_epsilon
+#'
+#' @description
+#' This [CallbackAsyncTuning] decays the epsilon parameter of the expected improvement acquisition function.
+#'
+#' @param rate (`numeric(1)`)\cr
+#' Rate of the exponential decay.
+#' Defaults to `0.1`.
+#' @param period (`integer(1)`)\cr
+#' Period of the exponential decay.
+#' Defaults to `25`.
+#' @examples
+#' clbk("mlr3mbo.sample_epsilon", rate = 0.1, period = 25)
+NULL
+
+load_callback_epsilon_decay = function() {
+ callback_async_tuning("mlr3mbo.epsilon_decay",
+ label = "Episilon Decay",
+ man = "mlr3mbo::mlr3mbo.epsilon_decay",
+
+ on_optimization_begin = function(callback, context) {
+ assert_class(context$optimizer$acq_function, "AcqFunctionEI")
+ epsilon = context$optimizer$acq_function$constants$values$epsilon
+ callback$state$epsilon_0 = epsilon
+ callback$state$epsilon = epsilon
+ callback$state$rate = assert_number(callback$state$rate, lower = 0, null.ok = TRUE) %??% 0.1
+ callback$state$period = assert_number(callback$state$period, lower = 1, null.ok = TRUE) %??% 25
+ callback$state$t = 0
+ },
+
+ on_optimizer_after_eval = function(callback, context) {
+ if (!is.null(context$extra)) { # skip initial design
+ t = callback$state$t
+ epsilon_0 = callback$state$epsilon_0
+ epsilon = epsilon_0 * exp(-callback$state$rate * (t %% callback$state$period))
+ callback$state$t = t + 1
+ context$optimizer$acq_function$constants$set_values(epsilon = epsilon)
+ context$ys = c(context$ys, list(epsilon_0 = epsilon_0, epsilon = epsilon))
+ }
+ }
+ )
+}
+
+callbacks[["mlr3mbo.epsilon_decay"]] = load_callback_epsilon_decay
diff --git a/R/zzz.R b/R/zzz.R
index 003b52c1..62d0e570 100644
--- a/R/zzz.R
+++ b/R/zzz.R
@@ -9,7 +9,7 @@
#' @import lgr
#' @import mlr3
#' @import mlr3tuning
-#' @importFrom stats setNames runif dnorm pnorm quantile
+#' @importFrom stats setNames runif dnorm pnorm quantile rexp
#' @useDynLib mlr3mbo c_sms_indicator c_eps_indicator
"_PACKAGE"
@@ -25,10 +25,17 @@ register_mlr3tuning = function() {
iwalk(tuners, function(obj, nm) x$add(nm, obj))
} # nocov end
+register_mlr3misc = function() {
+ # nocov start
+ x = utils::getFromNamespace("mlr_callbacks", ns = "mlr3misc")
+ iwalk(callbacks, function(obj, nm) x$add(nm, obj))
+} # nocov end
+
.onLoad = function(libname, pkgname) { # nolint
# nocov start
register_namespace_callback(pkgname, "bbotk", register_bbotk)
register_namespace_callback(pkgname, "mlr3tuning", register_mlr3tuning)
+ register_namespace_callback(pkgname, "mlr3misc", register_mlr3misc)
assign("lg", lgr::get_logger("bbotk"), envir = parent.env(environment()))
@@ -41,6 +48,7 @@ register_mlr3tuning = function() {
# nocov start
walk(names(optimizers), function(id) bbotk::mlr_optimizers$remove(id))
walk(names(tuners), function(id) mlr3tuning::mlr_tuners$remove(id))
+ walk(names(callbacks), function(id) mlr3misc::mlr_callbacks$remove(id))
} # nocov end
# static code checks should not complain about commonly used data.table columns
diff --git a/attic/OptimizerADBO.R b/attic/OptimizerADBO.R
new file mode 100644
index 00000000..3cdd4754
--- /dev/null
+++ b/attic/OptimizerADBO.R
@@ -0,0 +1,152 @@
+#' @title Asynchronous Decentralized Bayesian Optimization
+#' @name mlr_optimizers_adbo
+#'
+#' @description
+#' Asynchronous Decentralized Bayesian Optimization (ADBO).
+#'
+#' @note
+#' The \eqn{\lambda} parameter of the upper confidence bound acquisition function controls the trade-off between exploration and exploitation.
+#' A large \eqn{\lambda} value leads to more exploration, while a small \eqn{\lambda} value leads to more exploitation.
+#' ADBO can use periodic exponential decay to reduce \eqn{\lambda} periodically to the exploitation phase.
+#'
+#' @section Parameters:
+#' \describe{
+#' \item{`lambda`}{`numeric(1)`\cr
+#' \eqn{\lambda} value used for the confidence bound.
+#' Defaults to `1.96`.}
+#' \item{`exponential_decay`}{`lgl(1)`\cr
+#' Whether to use periodic exponential decay for \eqn{\lambda}.}
+#' \item{`rate`}{`numeric(1)`\cr
+#' Rate of the exponential decay.}
+#' \item{`t`}{`integer(1)`\cr
+#' Period of the exponential decay.}
+#' \item{`initial_design_size`}{`integer(1)`\cr
+#' Size of the initial design.}
+#' \item{`initial_design`}{`data.table`\cr
+#' Initial design.}
+#' \item{`impute_method`}{`character(1)`\cr
+#' Imputation method for missing values in the surrogate model.}
+#' \item{`n_workers`}{`integer(1)`\cr
+#' Number of workers to use.
+#' Defaults to the number of workers set by `rush::rush_plan()`}
+#' }
+#'
+#' @export
+OptimizerADBO = R6Class("OptimizerADBO",
+ inherit = OptimizerAsyncMbo,
+
+ public = list(
+
+ #' @description
+ #' Creates a new instance of this [R6][R6::R6Class] class.
+ initialize = function() {
+ param_set = ps(
+ lambda = p_dbl(lower = 0, default = 1.96),
+ exponential_decay = p_lgl(default = TRUE),
+ rate = p_dbl(lower = 0, default = 0.1),
+ period = p_int(lower = 1L, default = 25L),
+ design_size = p_int(lower = 1L),
+ initial_design = p_uty(),
+ impute_method = p_fct(c("mean", "random"), default = "random"),
+ n_workers = p_int(lower = 1L, default = NULL, special_vals = list(NULL))
+ )
+
+ param_set$set_values(lambda = 1.96, exponential_decay = TRUE, rate = 0.1, period = 25L, design_size = 1L, impute_method = "random")
+
+ super$initialize("adbo",
+ param_set = param_set,
+ param_classes = c("ParamLgl", "ParamInt", "ParamDbl", "ParamFct"),
+ properties = c("dependencies", "single-crit"),
+ packages = "mlr3mbo",
+ label = "Asynchronous Decentralized Bayesian Optimization",
+ man = "mlr3mbo::OptimizerADBO")
+ },
+
+
+ #' @description
+ #' Performs the optimization on a [OptimInstanceAsyncSingleCrit] or [OptimInstanceAsyncMultiCrit] until termination.
+ #' The single evaluations will be written into the [ArchiveAsync].
+ #' The result will be written into the instance object.
+ #'
+ #' @param inst ([OptimInstanceAsyncSingleCrit] | [OptimInstanceAsyncMultiCrit]).
+ #'
+ #' @return [data.table::data.table()]
+ optimize = function(inst) {
+ pv = self$param_set$values
+
+ # initial design
+ design = if (is.null(pv$initial_design)) {
+
+ lg$debug("Generating sobol design with size %s", pv$design_size)
+ generate_design_sobol(inst$search_space, n = pv$design_size)$data
+ } else {
+
+ lg$debug("Using provided initial design with size %s", nrow(pv$initial_design))
+ pv$initial_design
+ }
+
+ optimize_async_default(inst, self, design, n_workers = pv$n_workers)
+ }
+ ),
+
+ private = list(
+
+ .optimize = function(inst) {
+ pv = self$param_set$values
+ search_space = inst$search_space
+ archive = inst$archive
+
+ # sample lambda from exponential distribution
+ lambda_0 = rexp(1, 1 / pv$lambda)
+ t = 0
+
+ surrogate = default_surrogate(inst)
+ surrogate$param_set$set_values(impute_method = pv$impute_method)
+ acq_function = acqf("cb", lambda = runif(1, 1 , 3))
+ acq_optimizer = acqo(opt("random_search", batch_size = 1000L), terminator = trm("evals", n_evals = 10000L))
+ surrogate$archive = inst$archive
+ acq_function$surrogate = surrogate
+ acq_optimizer$acq_function = acq_function
+
+ lg$debug("Optimizer '%s' evaluates the initial design", self$id)
+ evaluate_queue_default(inst)
+
+ lg$debug("Optimizer '%s' starts the tuning phase", self$id)
+
+ # actual loop
+ while (!inst$is_terminated) {
+
+ # decrease lambda
+ if (pv$exponential_decay) {
+ lambda = lambda_0 * exp(-pv$rate * (t %% pv$period))
+ t = t + 1
+ } else {
+ lambda = pv$lambda
+ }
+
+ # sample
+ acq_function$constants$set_values(lambda = lambda)
+ acq_function$surrogate$update()
+ acq_function$update()
+ xdt = acq_optimizer$optimize()
+
+ # transpose point
+ xss = transpose_list(xdt)
+ xs = xss[[1]][inst$archive$cols_x]
+ lg$trace("Optimizer '%s' draws %s", self$id, as_short_string(xs))
+ xs_trafoed = trafo_xs(xs, search_space)
+
+ # eval
+ key = archive$push_running_point(xs)
+ ys = inst$objective$eval(xs_trafoed)
+
+ # push result
+ extra = c(xss[[1]][c("acq_cb", ".already_evaluated")], list(lambda_0 = lambda_0, lambda = lambda))
+ archive$push_result(key, ys, x_domain = xs_trafoed, extra = extra)
+ }
+ }
+ )
+)
+
+#' @include aaa.R
+optimizers[["adbo"]] = OptimizerADBO
diff --git a/attic/TunerADBO.R b/attic/TunerADBO.R
new file mode 100644
index 00000000..6c89074f
--- /dev/null
+++ b/attic/TunerADBO.R
@@ -0,0 +1,48 @@
+#' @title Asynchronous Decentralized Bayesian Optimization
+#' @name mlr_tuners_adbo
+#'
+#' @description
+#' Asynchronous Decentralized Bayesian Optimization (ADBO).
+#'
+#' @note
+#' The \eqn{\lambda} parameter of the upper confidence bound acquisition function controls the trade-off between exploration and exploitation.
+#' A large \eqn{\lambda} value leads to more exploration, while a small \eqn{\lambda} value leads to more exploitation.
+#' ADBO can use periodic exponential decay to reduce \eqn{\lambda} periodically to the exploitation phase.
+#'
+#' @section Parameters:
+#' \describe{
+#' \item{`lambda`}{`numeric(1)`\cr
+#' \eqn{\lambda} value used for the confidence bound.
+#' Defaults to `1.96`.}
+#' \item{`exponential_decay`}{`lgl(1)`\cr
+#' Whether to use periodic exponential decay for \eqn{\lambda}.}
+#' \item{`rate`}{`numeric(1)`\cr
+#' Rate of the exponential decay.}
+#' \item{`t`}{`integer(1)`\cr
+#' Period of the exponential decay.}
+#' \item{`initial_design_size`}{`integer(1)`\cr
+#' Size of the initial design.}
+#' \item{`initial_design`}{`data.table`\cr
+#' Initial design.}
+#' }
+#'
+#' @export
+TunerADBO = R6Class("TunerADBO",
+ inherit = mlr3tuning::TunerAsyncFromOptimizerAsync,
+ public = list(
+
+ #' @description
+ #' Creates a new instance of this [R6][R6::R6Class] class.
+ initialize = function() {
+ super$initialize(
+ optimizer = OptimizerADBO$new(),
+ man = "mlr3tuning::mlr_tuners_adbo"
+ )
+ }
+ )
+)
+
+mlr_tuners$add("adbo", TunerADBO)
+
+#' @include aaa.R
+tuners[["adbo"]] = TunerADBO
diff --git a/attic/test_OptimizerADBO.R b/attic/test_OptimizerADBO.R
new file mode 100644
index 00000000..1c91606e
--- /dev/null
+++ b/attic/test_OptimizerADBO.R
@@ -0,0 +1,15 @@
+# test_that("adbo optimizer works", {
+# skip_on_cran()
+# skip_if_not_installed("rush")
+# flush_redis()
+
+# rush::rush_plan(n_workers = 2)
+# instance = oi_async(
+# objective = OBJ_2D,
+# search_space = PS_2D,
+# terminator = trm("evals", n_evals = 100),
+# )
+# optimizer = opt("adbo", design_size = 4)
+# optimizer$optimize(instance)
+# })
+
diff --git a/attic/test_TunerADBO.R b/attic/test_TunerADBO.R
new file mode 100644
index 00000000..5d677bff
--- /dev/null
+++ b/attic/test_TunerADBO.R
@@ -0,0 +1,110 @@
+test_that("adbo tuner works", {
+ skip_on_cran()
+ skip_if_not_installed("rush")
+ flush_redis()
+
+ learner = lrn("classif.rpart",
+ minsplit = to_tune(2, 128),
+ cp = to_tune(1e-04, 1e-1))
+
+ rush::rush_plan(n_workers = 4)
+ instance = ti_async(
+ task = tsk("pima"),
+ learner = learner,
+ resampling = rsmp("cv", folds = 3),
+ measure = msr("classif.ce"),
+ terminator = trm("evals", n_evals = 20),
+ store_benchmark_result = FALSE
+ )
+
+ tuner = tnr("adbo", design_size = 4)
+ tuner$optimize(instance)
+
+ expect_data_table(instance$archive$data, min.rows = 20L)
+ expect_rush_reset(instance$rush)
+})
+
+test_that("adbo works with transformation functions", {
+ skip_on_cran()
+ skip_if_not_installed("rush")
+ flush_redis()
+
+ learner = lrn("classif.rpart",
+ minsplit = to_tune(2, 128, logscale = TRUE),
+ cp = to_tune(1e-04, 1e-1, logscale = TRUE))
+
+ rush::rush_plan(n_workers = 2)
+ instance = ti_async(
+ task = tsk("pima"),
+ learner = learner,
+ resampling = rsmp("cv", folds = 3),
+ measure = msr("classif.ce"),
+ terminator = trm("evals", n_evals = 20),
+ store_benchmark_result = FALSE
+ )
+
+ optimizer = tnr("adbo", design_size = 4)
+ optimizer$optimize(instance)
+
+ expect_data_table(instance$archive$data, min.rows = 20)
+ expect_rush_reset(instance$rush)
+})
+
+test_that("search works with dependencies", {
+ skip_on_cran()
+ skip_if_not_installed("rush")
+ flush_redis()
+
+ learner = lrn("classif.rpart",
+ minsplit = to_tune(p_int(2, 128, depends = keep_model == TRUE)),
+ cp = to_tune(1e-04, 1e-1),
+ keep_model = to_tune())
+
+ rush::rush_plan(n_workers = 2)
+ instance = ti_async(
+ task = tsk("pima"),
+ learner = learner,
+ resampling = rsmp("cv", folds = 3),
+ measure = msr("classif.ce"),
+ terminator = trm("evals", n_evals = 20),
+ store_benchmark_result = FALSE
+ )
+
+ optimizer = tnr("adbo", design_size = 4)
+ optimizer$optimize(instance)
+
+ expect_data_table(instance$archive$data, min.rows = 20)
+ expect_rush_reset(instance$rush)
+})
+
+test_that("adbo works with branching", {
+ skip_on_cran()
+ skip_if_not_installed("rush")
+ skip_if_not_installed("mlr3pipelines")
+ flush_redis()
+ library(mlr3pipelines)
+
+ graph_learner = as_learner(ppl("branch", graphs = list(rpart = lrn("classif.rpart", id = "rpart"),debug = lrn("classif.debug", id = "debug"))))
+ graph_learner$param_set$set_values(
+ "rpart.cp" = to_tune(p_dbl(1e-04, 1e-1, depends = branch.selection == "rpart")),
+ "rpart.minsplit" = to_tune(p_int(2, 128, depends = branch.selection == "rpart")),
+ "debug.x" = to_tune(p_dbl(0, 1, depends = branch.selection == "debug")),
+ "branch.selection" = to_tune(c("rpart", "debug"))
+ )
+
+ rush::rush_plan(n_workers = 2)
+ instance = ti_async(
+ task = tsk("pima"),
+ learner = graph_learner,
+ resampling = rsmp("cv", folds = 3),
+ measure = msr("classif.ce"),
+ terminator = trm("evals", n_evals = 20),
+ store_benchmark_result = FALSE
+ )
+
+ optimizer = tnr("adbo", design_size = 4)
+ optimizer$optimize(instance)
+
+ expect_data_table(instance$archive$data, min.rows = 20)
+ expect_rush_reset(instance$rush)
+})
diff --git a/man/SurrogateLearnerAsync.Rd b/man/SurrogateLearnerAsync.Rd
new file mode 100644
index 00000000..911f9cef
--- /dev/null
+++ b/man/SurrogateLearnerAsync.Rd
@@ -0,0 +1,104 @@
+% Generated by roxygen2: do not edit by hand
+% Please edit documentation in R/SurrogateLearnerAsync.R
+\name{SurrogateLearnerAsync}
+\alias{SurrogateLearnerAsync}
+\title{Surrogate Model Containing a Single Learner}
+\description{
+Surrogate model containing a single \link[mlr3:LearnerRegr]{mlr3::LearnerRegr}.
+}
+\section{Parameters}{
+
+\describe{
+\item{\code{assert_insample_perf}}{\code{logical(1)}\cr
+Should the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr} be asserted after updating the surrogate?
+If the assertion fails (i.e., the insample performance based on the \code{perf_measure} does not meet the
+\code{perf_threshold}), an error is thrown.
+Default is \code{FALSE}.
+}
+\item{\code{perf_measure}}{\link[mlr3:MeasureRegr]{mlr3::MeasureRegr}\cr
+Performance measure which should be use to assert the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr}.
+Only relevant if \code{assert_insample_perf = TRUE}.
+Default is \link[mlr3:mlr_measures_regr.rsq]{mlr3::mlr_measures_regr.rsq}.
+}
+\item{\code{perf_threshold}}{\code{numeric(1)}\cr
+Threshold the insample performance of the \link[mlr3:LearnerRegr]{mlr3::LearnerRegr} should be asserted against.
+Only relevant if \code{assert_insample_perf = TRUE}.
+Default is \code{0}.
+}
+\item{\code{catch_errors}}{\code{logical(1)}\cr
+Should errors during updating the surrogate be caught and propagated to the \code{loop_function} which can then handle
+the failed acquisition function optimization (as a result of the failed surrogate) appropriately by, e.g., proposing a randomly sampled point for evaluation?
+Default is \code{TRUE}.
+}
+\item{\code{impute_method}}{\code{character(1)}\cr
+Method to impute missing values in the surrogate model.
+}
+}
+}
+
+\section{Super classes}{
+\code{\link[mlr3mbo:Surrogate]{mlr3mbo::Surrogate}} -> \code{\link[mlr3mbo:SurrogateLearner]{mlr3mbo::SurrogateLearner}} -> \code{SurrogateLearnerAsync}
+}
+\section{Methods}{
+\subsection{Public methods}{
+\itemize{
+\item \href{#method-SurrogateLearnerAsync-new}{\code{SurrogateLearnerAsync$new()}}
+\item \href{#method-SurrogateLearnerAsync-clone}{\code{SurrogateLearnerAsync$clone()}}
+}
+}
+\if{html}{\out{
+Inherited methods
+
+