diff --git a/NEWS.md b/NEWS.md index d89c16d5..c5a43846 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,6 +1,9 @@ # mlr3fselect 0.7.2.9000 * refactor: `FSelectorRFE` throws an error if the learner does not support the `$importance()` method. +* refactor: The `AutoFSelector` stores the instance and benchmark result if `store_models = TRUE`. +* refactor: The `AutoFSelector` stores the instance if `store_benchmark_result = TRUE`. +* feat: Add missing parameters from `AutoFSelector` to `auto_fselect()`. # mlr3fselect 0.7.2 diff --git a/R/AutoFSelector.R b/R/AutoFSelector.R index 1aaf0e03..63187d0e 100644 --- a/R/AutoFSelector.R +++ b/R/AutoFSelector.R @@ -1,24 +1,32 @@ #' @title AutoFSelector #' #' @description -#' The `AutoFSelector` is a [mlr3::Learner] which wraps another [mlr3::Learner] -#' and performs the following steps during `$train()`: +#' The [AutoFSelector] wraps a [mlr3::Learner] and augments it with an automatic feature selection. +#' The [auto_fselector()] function creates an [AutoFSelector] object. #' -#' 1. The wrapped (inner) learner is trained on the feature subsets via -#' resampling. The feature selection can be specified by providing a -#' [FSelector], a [bbotk::Terminator], a [mlr3::Resampling] and a -#' [mlr3::Measure]. -#' 2. A final model is fit on the complete training data with the best found -#' feature subset. +#' @details +#' The [AutoFSelector] is a [mlr3::Learner] which wraps another [mlr3::Learner] and performs the following steps during `$train()`: #' -#' During `$predict()` the `AutoFSelector` just calls the predict method of the -#' wrapped (inner) learner. +#' 1. The wrapped (inner) learner is trained on the feature subsets via resampling. +#' The feature selection can be specified by providing a [FSelector], a [bbotk::Terminator], a [mlr3::Resampling] and a [mlr3::Measure]. +#' 2. A final model is fit on the complete training data with the best found feature subset. #' -#' Note that this approach allows to perform nested resampling by passing an -#' [AutoFSelector] object to [mlr3::resample()] or [mlr3::benchmark()]. -#' To access the inner resampling results, set `store_fselect_instance = TRUE` -#' and execute [mlr3::resample()] or [mlr3::benchmark()] with -#' `store_models = TRUE`. +#' During `$predict()` the [AutoFSelector] just calls the predict method of the wrapped (inner) learner. +#' +#' @section Resources: +#' * [book chapter](https://mlr3book.mlr-org.com/feature-selection.html#autofselect) on automatic feature selection. +#' +#' @section Nested Resampling: +#' Nested resampling can be performed by passing an [AutoFSelector] object to [mlr3::resample()] or [mlr3::benchmark()]. +#' To access the inner resampling results, set `store_fselect_instance = TRUE` and execute [mlr3::resample()] or [mlr3::benchmark()] with `store_models = TRUE` (see examples). +#' The [mlr3::Resampling] passed to the [AutoFSelector] is meant to be the inner resampling, operating on the training set of an arbitrary outer resampling. +#' For this reason it is not feasible to pass an instantiated [mlr3::Resampling] here. +#' +#' @template param_learner +#' @template param_resampling +#' @template param_measure +#' @template param_terminator +#' @template param_store_fselect_instance #' #' @template param_store_models #' @template param_check_values @@ -26,80 +34,85 @@ #' #' @export #' @examples -#' library(mlr3) +#' # Automafsic Feafsure Selection +#' +#' task = tsk("penguins") +#' train_set = sample(task$nrow, 0.8 * task$nrow) +#' test_set = setdiff(seq_len(task$nrow), train_set) #' -#' task = tsk("iris") -#' learner = lrn("classif.rpart") -#' resampling = rsmp("holdout") -#' measure = msr("classif.ce") +#' afs = auto_fselector( +#' method = fs("random_search"), +#' learner = lrn("classif.rpart"), +#' resampling = rsmp ("holdout"), +#' measure = msr("classif.ce"), +#' term_evals = 4) #' -#' terminator = trm("evals", n_evals = 3) -#' fselector = fs("exhaustive_search") -#' afs = AutoFSelector$new(learner, resampling, measure, terminator, fselector, -#' store_fselect_instance = TRUE) +#' # optimize feafsure subset and fit final model +#' afs$train(task, row_ids = train_set) #' -#' afs$train(task) +#' # predict with final model +#' afs$predict(task, row_ids = test_set) +#' +#' # show fselect result +#' afs$fselect_result +#' +#' # model slot contains trained learner and fselect instance #' afs$model +#' +#' # shortcut trained learner #' afs$learner +#' +#' # shortcut fselect instance +#' afs$tuning_instance +#' +#' +#' # Nested Resampling +#' +#' afs = auto_fselector( +#' method = fs("random_search"), +#' learner = lrn("classif.rpart"), +#' resampling = rsmp ("holdout"), +#' measure = msr("classif.ce"), +#' term_evals = 4) +#' +#' resampling_outer = rsmp("cv", folds = 3) +#' rr = resample(task, afs, resampling_outer, store_models = TRUE) +#' +#' # retrieve inner tuning results. +#' extract_inner_fselect_results(rr) +#' +#' # performance scores estimated on the outer resampling +#' rr$score() +#' +#' # unbiased performance of the final model trained on the full dafsa set +#' rr$aggregate() AutoFSelector = R6Class("AutoFSelector", inherit = Learner, public = list( #' @field instance_args (`list()`)\cr - #' All arguments from construction to create the - #' [FSelectInstanceSingleCrit]. + #' All arguments from construction to create the [FSelectInstanceSingleCrit]. instance_args = NULL, #' @field fselector ([FSelector])\cr - #' Stores the feature selection algorithm. + #' Optimization algorithm. fselector = NULL, #' @description #' Creates a new instance of this [R6][R6::R6Class] class. #' - #' @param learner ([mlr3::Learner])\cr - #' Learner to optimize the feature subset for, see - #' [FSelectInstanceSingleCrit]. - #' - #' @param resampling ([mlr3::Resampling])\cr - #' Resampling strategy during feature selection, see - #' [FSelectInstanceSingleCrit]. This [mlr3::Resampling] is meant to be the - #' **inner** resampling, operating on the training set of an arbitrary outer - #' resampling. For this reason it is not feasible to pass an instantiated - #' [mlr3::Resampling] here. - #' - #' @param measure ([mlr3::Measure])\cr - #' Performance measure to optimize. - #' - #' @param terminator ([bbotk::Terminator])\cr - #' When to stop feature selection, see [FSelectInstanceSingleCrit]. - #' #' @param fselector ([FSelector])\cr - #' Feature selection algorithm to run. - #' - #' @param store_fselect_instance (`logical(1)`)\cr - #' If `TRUE` (default), stores the internally created - #' [FSelectInstanceSingleCrit] with all intermediate results in slot - #' `$fselect_instance`. - initialize = function(learner, resampling, measure, terminator, fselector, - store_fselect_instance = TRUE, store_benchmark_result = TRUE, - store_models = FALSE, check_values = FALSE) { + #' Optimization algorithm. + initialize = function(learner, resampling, measure = NULL, terminator, fselector, store_fselect_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE) { ia = list() ia$learner = assert_learner(as_learner(learner, clone = TRUE)) - ia$resampling = assert_resampling(resampling, - instantiated = FALSE)$clone() - ia$measure = assert_measure(as_measure(measure), learner = learner) + ia$resampling = assert_resampling(resampling, instantiated = FALSE)$clone() + if (!is.null(measure)) ia$measure = assert_measure(as_measure(measure), learner = learner) ia$terminator = assert_terminator(terminator)$clone() - private$.store_fselect_instance = assert_flag(store_fselect_instance) - ia$store_benchmark_result = assert_flag(store_benchmark_result) - ia$store_models = assert_flag(store_models) - if (!private$.store_fselect_instance && ia$store_benchmark_result) { - stop("Benchmark results can only be stored if store_fselect_instance is set to TRUE") - } - if (ia$store_models && !ia$store_benchmark_result) { - stop("Models can only be stored if store_benchmark_result is set to TRUE") - } + ia$store_models = assert_flag(store_models) + ia$store_benchmark_result = assert_flag(store_benchmark_result) || ia$store_models + private$.store_fselect_instance = assert_flag(store_fselect_instance) || ia$store_benchmark_result ia$check_values = assert_flag(check_values) self$instance_args = ia @@ -111,7 +124,6 @@ AutoFSelector = R6Class("AutoFSelector", packages = c("mlr3fselect", learner$packages), feature_types = learner$feature_types, predict_types = learner$predict_types, - param_set = learner$param_set, properties = learner$properties ) @@ -120,52 +132,96 @@ AutoFSelector = R6Class("AutoFSelector", }, #' @description - #' Extracts the base learner from nested learner objects like - #' `GraphLearner` in \CRANpkg{mlr3pipelines}. If `recursive = 0`, the (tuned) - #' learner is returned. + #' Extracts the base learner from nested learner objects like `GraphLearner` in \CRANpkg{mlr3pipelines}. + #' If `recursive = 0`, the (tuned) learner is returned. #' #' @param recursive (`integer(1)`)\cr #' Depth of recursion for multiple nested objects. #' #' @return [Learner]. base_learner = function(recursive = Inf) { - if(recursive == 0) self$learner else self$learner$base_learner(recursive -1) - } - ), - - private = list( - - .train = function(task) { - - ia = self$instance_args - ia$task = task$clone() - instance = invoke(FSelectInstanceSingleCrit$new, .args = ia) - self$fselector$optimize(instance) - - feat = task$feature_names[as.logical(instance$result_x_search_space)] - ia$task$select(feat) + if (recursive == 0L) self$learner else self$learner$base_learner(recursive - 1L) + }, - learner = ia$learner$clone(deep = TRUE) - learner$train(ia$task) + #' @description + #' The importance scores of the final model. + #' + #' @return Named `numeric()`. + importance = function() { + if ("importance" %nin% self$instance_args$learner$properties) { + stopf("Learner ''%s' cannot calculate important scores.", self$instance_args$learner$id) + } + if (is.null(self$model$learner$model)) { + self$instance_args$learner$importance() + } else { + self$model$learner$importance() + } + }, - result_model = list(learner = learner, features = feat) - if (isTRUE(private$.store_fselect_instance)) { - result_model$fselect_instance = instance + #' @description + #' The selected features of the final model. + #' These features are selected internally by the learner. + #' + #' @return `character()`. + selected_features = function() { + if ("selected_features" %nin% self$instance_args$learner$properties) { + stopf("Learner ''%s' cannot select features.", self$instance_args$learner$id) + } + if (is.null(self$model$learner$model)) { + self$instance_args$learner$selected_features() + } else { + self$model$learner$selected_features() } - return(result_model) }, - .predict = function(task) { - task = task$clone(deep = TRUE) - task$select(self$model$features) - self$model$learner$predict(task) + #' @description + #' The out-of-bag error of the final model. + #' + #' @return `numeric(1)`. + oob_error = function() { + if ("oob_error" %nin% self$instance_args$learner$properties) { + stopf("Learner '%s' cannot calculate the out-of-bag error.", self$instance_args$learner$id) + } + if (is.null(self$model$learner$model)) { + self$instance_args$learner$oob_error() + } else { + self$model$learner$oob_error() + } }, - .base_learner = function(recursive = Inf) { - if (recursive == 0L) self$learner else self$learner$base_learner(recursive - 1L) + #' @description + #' The log-likelihood of the final model. + #' + #' @return `logLik`. + loglik = function() { + if ("loglik" %nin% self$instance_args$learner$properties) { + stopf("Learner '%s' cannot calculate the log-likelihood.", self$instance_args$learner$id) + } + if (is.null(self$model$learner$model)) { + self$instance_args$learner$loglik() + } else { + self$model$learner$loglik() + } }, - .store_fselect_instance = NULL + #' Printer. + #' @param ... (ignored). + print = function() { + catf(format(self)) + catf(str_indent("* Model:", if (is.null(self$model)) "-" else class(self$model)[1L])) + catf(str_indent("* Packages:", self$packages)) + catf(str_indent("* Predict Type:", self$predict_type)) + catf(str_indent("* Feature Types:", self$feature_types)) + catf(str_indent("* Properties:", self$properties)) + w = self$warnings + e = self$errors + if (length(w)) { + catf(str_indent("* Warnings:", w)) + } + if (length(e)) { + catf(str_indent("* Errors:", e)) + } + } ), active = list( @@ -178,7 +234,7 @@ AutoFSelector = R6Class("AutoFSelector", #' Trained learner. learner = function() { # if there is no trained learner, we return the one in instance args - if (is.null(self$model)) { + if (is.null(self$model$learner$model)) { self$instance_args$learner } else { self$model$learner @@ -186,14 +242,32 @@ AutoFSelector = R6Class("AutoFSelector", }, #' @field fselect_instance ([FSelectInstanceSingleCrit])\cr - #' Internally created feature selection instance with all intermediate - #' results. + #' Internally created feature selection instance with all intermediate results. fselect_instance = function() self$model$fselect_instance, #' @field fselect_result ([data.table::data.table])\cr #' Short-cut to `$result` from [FSelectInstanceSingleCrit]. fselect_result = function() self$fselect_instance$result, + #' @field predict_type (`character(1)`)\cr + #' Stores the currently active predict type, e.g. `"response"`. + #' Must be an element of `$predict_types`. + predict_type = function(rhs) { + if (missing(rhs)) { + return(private$.predict_type) + } + if (rhs %nin% self$predict_types) { + stopf("Learner '%s' does not support predict type '%s'", self$id, rhs) + } + + # Catches 'Error: Field/Binding is read-only' bug + tryCatch({ + self$model$learner$predict_type = rhs + }, error = function(cond){}) + + private$.predict_type = rhs + }, + #' @field hash (`character(1)`)\cr #' Hash (unique identifier) for this object. hash = function(rhs) { @@ -201,5 +275,39 @@ AutoFSelector = R6Class("AutoFSelector", calculate_hash(class(self), self$id, self$param_set$values, private$.predict_type, self$fallback$hash, self$instance_args, private$.store_fselect_instance) } + ), + + private = list( + .train = function(task) { + # construct instance from args; then tune + ia = self$instance_args + ia$task = task$clone() + instance = invoke(FSelectInstanceSingleCrit$new, .args = ia) + self$fselector$optimize(instance) + learner = ia$learner$clone(deep = TRUE) + task = task$clone() + + # disable timeout to allow train on full data set without time limit + # timeout during tuning is not affected + learner$timeout = c(train = Inf, predict = Inf) + + # fit final model + feat = task$feature_names[as.logical(instance$result_x_search_space)] + task$select(feat) + learner$train(task) + + # the return model is a list of "learner", "features" and "fselect_instance" + result_model = list(learner = learner, features = feat) + if (private$.store_fselect_instance) result_model$fselect_instance = instance + result_model + }, + + .predict = function(task) { + task = task$clone(deep = TRUE) + task$select(self$model$features) + self$model$learner$predict(task) + }, + + .store_fselect_instance = NULL ) ) diff --git a/R/auto_fselector.R b/R/auto_fselector.R index ecbe7c61..49f01dfe 100644 --- a/R/auto_fselector.R +++ b/R/auto_fselector.R @@ -1,7 +1,9 @@ -#' @title Syntactic Sugar for Automatic Feature Selection +#' @title Function for Automatic Feature Selection #' -#' @description -#' Function to create an [AutoFSelector] object. +#' @inherit AutoFSelector description +#' @inheritSection AutoFSelector Resources +#' @inherit AutoFSelector details +#' @inheritSection AutoFSelector Nested Resampling #' #' @param method (`character(1)` | [FSelector])\cr #' Key to retrieve fselector from [mlr_fselectors] dictionary or [FSelector] object. @@ -12,11 +14,16 @@ #' @param ... (named `list()`)\cr #' Named arguments to be set as parameters of the fselector. #' -#' @return [AutoFSelector] +#' @return [AutoFSelector]. #' #' @template param_learner #' @template param_resampling #' @template param_measure +#' @template param_terminator +#' @template param_store_fselect_instance +#' @template param_store_benchmark_result +#' @template param_store_models +#' @template param_check_values #' #' @export #' @examples @@ -28,14 +35,14 @@ #' term_evals = 4) #' #' at$train(tsk("pima")) -auto_fselector = function(method, learner, resampling, measure, term_evals = NULL, term_time = NULL, ...) { +auto_fselector = function(method, learner, resampling, measure = NULL, term_evals = NULL, term_time = NULL, terminator = NULL, store_fselect_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE, check_values = FALSE, ...) { fselector = if (is.character(method)) { assert_choice(method, mlr_fselectors$keys()) fs(method, ...) } else { assert_fselector(method) } - terminator = terminator_selection(term_evals, term_time) + terminator = terminator %??% terminator_selection(term_evals, term_time) - AutoFSelector$new(learner, resampling, measure, terminator, fselector) + AutoFSelector$new(learner = learner, resampling = resampling, measure = measure, terminator = terminator, fselector = fselector, store_fselect_instance = store_fselect_instance, store_benchmark_result = store_benchmark_result, store_models = store_models, check_values = check_values) } diff --git a/man-roxygen/param_learner.R b/man-roxygen/param_learner.R index c6e624ba..2703ebf8 100644 --- a/man-roxygen/param_learner.R +++ b/man-roxygen/param_learner.R @@ -1 +1,2 @@ -#' @param learner ([mlr3::Learner]). +#' @param learner ([mlr3::Learner])\cr +#' Learner to optimize the feature subset for. diff --git a/man-roxygen/param_measure.R b/man-roxygen/param_measure.R index c397e1bb..fcb0a2e4 100644 --- a/man-roxygen/param_measure.R +++ b/man-roxygen/param_measure.R @@ -1,2 +1,2 @@ #' @param measure ([mlr3::Measure])\cr -#' Measure to optimize. +#' Measure to optimize. If `NULL`, default measure is used. diff --git a/man-roxygen/param_resampling.R b/man-roxygen/param_resampling.R index 8f6bb2d6..e0bd515f 100644 --- a/man-roxygen/param_resampling.R +++ b/man-roxygen/param_resampling.R @@ -1,3 +1,4 @@ #' @param resampling ([mlr3::Resampling])\cr -#' Uninstantiated resamplings are instantiated during construction -#' so that all configurations are evaluated on the same data splits. +#' Resampling that is used to evaluated the performance of the feature subsets. +#' Uninstantiated resamplings are instantiated during construction so that all feature subsets are evaluated on the same data splits. +#' Already instantiated resamplings are kept unchanged. diff --git a/man-roxygen/param_store_fselect_instance.R b/man-roxygen/param_store_fselect_instance.R new file mode 100644 index 00000000..a6a569e8 --- /dev/null +++ b/man-roxygen/param_store_fselect_instance.R @@ -0,0 +1,3 @@ +#' @param store_fselect_instance (`logical(1)`)\cr +#' If `TRUE` (default), stores the internally created [FSelectInstanceSingleCrit] with all intermediate results in slot `$fselect_instance`. +#' Is set to `TRUE`, if `store_models = TRUE` diff --git a/man-roxygen/param_terminator.R b/man-roxygen/param_terminator.R index 676fb1fb..66f298f1 100644 --- a/man-roxygen/param_terminator.R +++ b/man-roxygen/param_terminator.R @@ -1 +1,2 @@ -#' @param terminator ([bbotk::Terminator]). +#' @param terminator ([Terminator])\cr +#' Stop criterion of the feature selection. diff --git a/man/AutoFSelector.Rd b/man/AutoFSelector.Rd index a9c2ea0e..ed1cf498 100644 --- a/man/AutoFSelector.Rd +++ b/man/AutoFSelector.Rd @@ -4,42 +4,87 @@ \alias{AutoFSelector} \title{AutoFSelector} \description{ -The \code{AutoFSelector} is a \link[mlr3:Learner]{mlr3::Learner} which wraps another \link[mlr3:Learner]{mlr3::Learner} -and performs the following steps during \verb{$train()}: +The \link{AutoFSelector} wraps a \link[mlr3:Learner]{mlr3::Learner} and augments it with an automatic feature selection. +The \code{\link[=auto_fselector]{auto_fselector()}} function creates an \link{AutoFSelector} object. +} +\details{ +The \link{AutoFSelector} is a \link[mlr3:Learner]{mlr3::Learner} which wraps another \link[mlr3:Learner]{mlr3::Learner} and performs the following steps during \verb{$train()}: \enumerate{ -\item The wrapped (inner) learner is trained on the feature subsets via -resampling. The feature selection can be specified by providing a -\link{FSelector}, a \link[bbotk:Terminator]{bbotk::Terminator}, a \link[mlr3:Resampling]{mlr3::Resampling} and a -\link[mlr3:Measure]{mlr3::Measure}. -\item A final model is fit on the complete training data with the best found -feature subset. +\item The wrapped (inner) learner is trained on the feature subsets via resampling. +The feature selection can be specified by providing a \link{FSelector}, a \link[bbotk:Terminator]{bbotk::Terminator}, a \link[mlr3:Resampling]{mlr3::Resampling} and a \link[mlr3:Measure]{mlr3::Measure}. +\item A final model is fit on the complete training data with the best found feature subset. +} + +During \verb{$predict()} the \link{AutoFSelector} just calls the predict method of the wrapped (inner) learner. +} +\section{Resources}{ + +\itemize{ +\item \href{https://mlr3book.mlr-org.com/feature-selection.html#autofselect}{book chapter} on automatic feature selection. +} } -During \verb{$predict()} the \code{AutoFSelector} just calls the predict method of the -wrapped (inner) learner. +\section{Nested Resampling}{ -Note that this approach allows to perform nested resampling by passing an -\link{AutoFSelector} object to \code{\link[mlr3:resample]{mlr3::resample()}} or \code{\link[mlr3:benchmark]{mlr3::benchmark()}}. -To access the inner resampling results, set \code{store_fselect_instance = TRUE} -and execute \code{\link[mlr3:resample]{mlr3::resample()}} or \code{\link[mlr3:benchmark]{mlr3::benchmark()}} with -\code{store_models = TRUE}. +Nested resampling can be performed by passing an \link{AutoFSelector} object to \code{\link[mlr3:resample]{mlr3::resample()}} or \code{\link[mlr3:benchmark]{mlr3::benchmark()}}. +To access the inner resampling results, set \code{store_fselect_instance = TRUE} and execute \code{\link[mlr3:resample]{mlr3::resample()}} or \code{\link[mlr3:benchmark]{mlr3::benchmark()}} with \code{store_models = TRUE} (see examples). +The \link[mlr3:Resampling]{mlr3::Resampling} passed to the \link{AutoFSelector} is meant to be the inner resampling, operating on the training set of an arbitrary outer resampling. +For this reason it is not feasible to pass an instantiated \link[mlr3:Resampling]{mlr3::Resampling} here. } + \examples{ -library(mlr3) +# Automafsic Feafsure Selection -task = tsk("iris") -learner = lrn("classif.rpart") -resampling = rsmp("holdout") -measure = msr("classif.ce") +task = tsk("penguins") +train_set = sample(task$nrow, 0.8 * task$nrow) +test_set = setdiff(seq_len(task$nrow), train_set) -terminator = trm("evals", n_evals = 3) -fselector = fs("exhaustive_search") -afs = AutoFSelector$new(learner, resampling, measure, terminator, fselector, - store_fselect_instance = TRUE) +afs = auto_fselector( + method = fs("random_search"), + learner = lrn("classif.rpart"), + resampling = rsmp ("holdout"), + measure = msr("classif.ce"), + term_evals = 4) -afs$train(task) +# optimize feafsure subset and fit final model +afs$train(task, row_ids = train_set) + +# predict with final model +afs$predict(task, row_ids = test_set) + +# show fselect result +afs$fselect_result + +# model slot contains trained learner and fselect instance afs$model + +# shortcut trained learner afs$learner + +# shortcut fselect instance +afs$tuning_instance + + +# Nested Resampling + +afs = auto_fselector( + method = fs("random_search"), + learner = lrn("classif.rpart"), + resampling = rsmp ("holdout"), + measure = msr("classif.ce"), + term_evals = 4) + +resampling_outer = rsmp("cv", folds = 3) +rr = resample(task, afs, resampling_outer, store_models = TRUE) + +# retrieve inner tuning results. +extract_inner_fselect_results(rr) + +# performance scores estimated on the outer resampling +rr$score() + +# unbiased performance of the final model trained on the full dafsa set +rr$aggregate() } \section{Super class}{ \code{\link[mlr3:Learner]{mlr3::Learner}} -> \code{AutoFSelector} @@ -48,11 +93,10 @@ afs$learner \if{html}{\out{
}} \describe{ \item{\code{instance_args}}{(\code{list()})\cr -All arguments from construction to create the -\link{FSelectInstanceSingleCrit}.} +All arguments from construction to create the \link{FSelectInstanceSingleCrit}.} \item{\code{fselector}}{(\link{FSelector})\cr -Stores the feature selection algorithm.} +Optimization algorithm.} } \if{html}{\out{
}} } @@ -66,12 +110,15 @@ Returns \link{FSelectInstanceSingleCrit} archive.} Trained learner.} \item{\code{fselect_instance}}{(\link{FSelectInstanceSingleCrit})\cr -Internally created feature selection instance with all intermediate -results.} +Internally created feature selection instance with all intermediate results.} \item{\code{fselect_result}}{(\link[data.table:data.table]{data.table::data.table})\cr Short-cut to \verb{$result} from \link{FSelectInstanceSingleCrit}.} +\item{\code{predict_type}}{(\code{character(1)})\cr +Stores the currently active predict type, e.g. \code{"response"}. +Must be an element of \verb{$predict_types}.} + \item{\code{hash}}{(\code{character(1)})\cr Hash (unique identifier) for this object.} } @@ -82,6 +129,11 @@ Hash (unique identifier) for this object.} \itemize{ \item \href{#method-AutoFSelector-new}{\code{AutoFSelector$new()}} \item \href{#method-AutoFSelector-base_learner}{\code{AutoFSelector$base_learner()}} +\item \href{#method-AutoFSelector-importance}{\code{AutoFSelector$importance()}} +\item \href{#method-AutoFSelector-selected_features}{\code{AutoFSelector$selected_features()}} +\item \href{#method-AutoFSelector-oob_error}{\code{AutoFSelector$oob_error()}} +\item \href{#method-AutoFSelector-loglik}{\code{AutoFSelector$loglik()}} +\item \href{#method-AutoFSelector-print}{\code{AutoFSelector$print()}} \item \href{#method-AutoFSelector-clone}{\code{AutoFSelector$clone()}} } } @@ -92,7 +144,6 @@ Hash (unique identifier) for this object.}
  • mlr3::Learner$help()
  • mlr3::Learner$predict()
  • mlr3::Learner$predict_newdata()
  • -
  • mlr3::Learner$print()
  • mlr3::Learner$reset()
  • mlr3::Learner$train()
  • @@ -107,7 +158,7 @@ Creates a new instance of this \link[R6:R6Class]{R6} class. \if{html}{\out{
    }}\preformatted{AutoFSelector$new( learner, resampling, - measure, + measure = NULL, terminator, fselector, store_fselect_instance = TRUE, @@ -121,29 +172,25 @@ Creates a new instance of this \link[R6:R6Class]{R6} class. \if{html}{\out{
    }} \describe{ \item{\code{learner}}{(\link[mlr3:Learner]{mlr3::Learner})\cr -Learner to optimize the feature subset for, see -\link{FSelectInstanceSingleCrit}.} +Learner to optimize the feature subset for.} \item{\code{resampling}}{(\link[mlr3:Resampling]{mlr3::Resampling})\cr -Resampling strategy during feature selection, see -\link{FSelectInstanceSingleCrit}. This \link[mlr3:Resampling]{mlr3::Resampling} is meant to be the -\strong{inner} resampling, operating on the training set of an arbitrary outer -resampling. For this reason it is not feasible to pass an instantiated -\link[mlr3:Resampling]{mlr3::Resampling} here.} +Resampling that is used to evaluated the performance of the feature subsets. +Uninstantiated resamplings are instantiated during construction so that all feature subsets are evaluated on the same data splits. +Already instantiated resamplings are kept unchanged.} \item{\code{measure}}{(\link[mlr3:Measure]{mlr3::Measure})\cr -Performance measure to optimize.} +Measure to optimize. If \code{NULL}, default measure is used.} -\item{\code{terminator}}{(\link[bbotk:Terminator]{bbotk::Terminator})\cr -When to stop feature selection, see \link{FSelectInstanceSingleCrit}.} +\item{\code{terminator}}{(\link{Terminator})\cr +Stop criterion of the feature selection.} \item{\code{fselector}}{(\link{FSelector})\cr -Feature selection algorithm to run.} +Optimization algorithm.} \item{\code{store_fselect_instance}}{(\code{logical(1)})\cr -If \code{TRUE} (default), stores the internally created -\link{FSelectInstanceSingleCrit} with all intermediate results in slot -\verb{$fselect_instance}.} +If \code{TRUE} (default), stores the internally created \link{FSelectInstanceSingleCrit} with all intermediate results in slot \verb{$fselect_instance}. +Is set to \code{TRUE}, if \code{store_models = TRUE}} \item{\code{store_benchmark_result}}{(\code{logical(1)})\cr Store benchmark result in archive?} @@ -162,9 +209,8 @@ validity?} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-AutoFSelector-base_learner}{}}} \subsection{Method \code{base_learner()}}{ -Extracts the base learner from nested learner objects like -\code{GraphLearner} in \CRANpkg{mlr3pipelines}. If \code{recursive = 0}, the (tuned) -learner is returned. +Extracts the base learner from nested learner objects like \code{GraphLearner} in \CRANpkg{mlr3pipelines}. +If \code{recursive = 0}, the (tuned) learner is returned. \subsection{Usage}{ \if{html}{\out{
    }}\preformatted{AutoFSelector$base_learner(recursive = Inf)}\if{html}{\out{
    }} } @@ -182,6 +228,76 @@ Depth of recursion for multiple nested objects.} } } \if{html}{\out{
    }} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AutoFSelector-importance}{}}} +\subsection{Method \code{importance()}}{ +The importance scores of the final model. +\subsection{Usage}{ +\if{html}{\out{
    }}\preformatted{AutoFSelector$importance()}\if{html}{\out{
    }} +} + +\subsection{Returns}{ +Named \code{numeric()}. +} +} +\if{html}{\out{
    }} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AutoFSelector-selected_features}{}}} +\subsection{Method \code{selected_features()}}{ +The selected features of the final model. +These features are selected internally by the learner. +\subsection{Usage}{ +\if{html}{\out{
    }}\preformatted{AutoFSelector$selected_features()}\if{html}{\out{
    }} +} + +\subsection{Returns}{ +\code{character()}. +} +} +\if{html}{\out{
    }} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AutoFSelector-oob_error}{}}} +\subsection{Method \code{oob_error()}}{ +The out-of-bag error of the final model. +\subsection{Usage}{ +\if{html}{\out{
    }}\preformatted{AutoFSelector$oob_error()}\if{html}{\out{
    }} +} + +\subsection{Returns}{ +\code{numeric(1)}. +} +} +\if{html}{\out{
    }} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AutoFSelector-loglik}{}}} +\subsection{Method \code{loglik()}}{ +The log-likelihood of the final model. +\subsection{Usage}{ +\if{html}{\out{
    }}\preformatted{AutoFSelector$loglik()}\if{html}{\out{
    }} +} + +\subsection{Returns}{ +\code{logLik}. +Printer. +} +} +\if{html}{\out{
    }} +\if{html}{\out{}} +\if{latex}{\out{\hypertarget{method-AutoFSelector-print}{}}} +\subsection{Method \code{print()}}{ +\subsection{Usage}{ +\if{html}{\out{
    }}\preformatted{AutoFSelector$print()}\if{html}{\out{
    }} +} + +\subsection{Arguments}{ +\if{html}{\out{
    }} +\describe{ +\item{\code{...}}{(ignored).} +} +\if{html}{\out{
    }} +} +} +\if{html}{\out{
    }} \if{html}{\out{}} \if{latex}{\out{\hypertarget{method-AutoFSelector-clone}{}}} \subsection{Method \code{clone()}}{ diff --git a/man/FSelectInstanceMultiCrit.Rd b/man/FSelectInstanceMultiCrit.Rd index b762693c..084c8f83 100644 --- a/man/FSelectInstanceMultiCrit.Rd +++ b/man/FSelectInstanceMultiCrit.Rd @@ -108,17 +108,20 @@ Creates a new instance of this \link[R6:R6Class]{R6} class. \item{\code{task}}{(\link[mlr3:Task]{mlr3::Task})\cr Task to operate on.} -\item{\code{learner}}{(\link[mlr3:Learner]{mlr3::Learner}).} +\item{\code{learner}}{(\link[mlr3:Learner]{mlr3::Learner})\cr +Learner to optimize the feature subset for.} \item{\code{resampling}}{(\link[mlr3:Resampling]{mlr3::Resampling})\cr -Uninstantiated resamplings are instantiated during construction -so that all configurations are evaluated on the same data splits.} +Resampling that is used to evaluated the performance of the feature subsets. +Uninstantiated resamplings are instantiated during construction so that all feature subsets are evaluated on the same data splits. +Already instantiated resamplings are kept unchanged.} \item{\code{measures}}{(list of \link[mlr3:Measure]{mlr3::Measure})\cr Measures to optimize. If \code{NULL}, \CRANpkg{mlr3}'s default measure is used.} -\item{\code{terminator}}{(\link[bbotk:Terminator]{bbotk::Terminator}).} +\item{\code{terminator}}{(\link{Terminator})\cr +Stop criterion of the feature selection.} \item{\code{store_models}}{(\code{logical(1)}). Store models in benchmark result?} diff --git a/man/FSelectInstanceSingleCrit.Rd b/man/FSelectInstanceSingleCrit.Rd index a04406c1..eac4953c 100644 --- a/man/FSelectInstanceSingleCrit.Rd +++ b/man/FSelectInstanceSingleCrit.Rd @@ -109,16 +109,19 @@ Creates a new instance of this \link[R6:R6Class]{R6} class. \item{\code{task}}{(\link[mlr3:Task]{mlr3::Task})\cr Task to operate on.} -\item{\code{learner}}{(\link[mlr3:Learner]{mlr3::Learner}).} +\item{\code{learner}}{(\link[mlr3:Learner]{mlr3::Learner})\cr +Learner to optimize the feature subset for.} \item{\code{resampling}}{(\link[mlr3:Resampling]{mlr3::Resampling})\cr -Uninstantiated resamplings are instantiated during construction -so that all configurations are evaluated on the same data splits.} +Resampling that is used to evaluated the performance of the feature subsets. +Uninstantiated resamplings are instantiated during construction so that all feature subsets are evaluated on the same data splits. +Already instantiated resamplings are kept unchanged.} \item{\code{measure}}{(\link[mlr3:Measure]{mlr3::Measure})\cr -Measure to optimize.} +Measure to optimize. If \code{NULL}, default measure is used.} -\item{\code{terminator}}{(\link[bbotk:Terminator]{bbotk::Terminator}).} +\item{\code{terminator}}{(\link{Terminator})\cr +Stop criterion of the feature selection.} \item{\code{store_models}}{(\code{logical(1)}). Store models in benchmark result?} diff --git a/man/ObjectiveFSelect.Rd b/man/ObjectiveFSelect.Rd index 436e8047..f3a82f5e 100644 --- a/man/ObjectiveFSelect.Rd +++ b/man/ObjectiveFSelect.Rd @@ -74,11 +74,13 @@ Creates a new instance of this \link[R6:R6Class]{R6} class. \item{\code{task}}{(\link[mlr3:Task]{mlr3::Task})\cr Task to operate on.} -\item{\code{learner}}{(\link[mlr3:Learner]{mlr3::Learner}).} +\item{\code{learner}}{(\link[mlr3:Learner]{mlr3::Learner})\cr +Learner to optimize the feature subset for.} \item{\code{resampling}}{(\link[mlr3:Resampling]{mlr3::Resampling})\cr -Uninstantiated resamplings are instantiated during construction -so that all configurations are evaluated on the same data splits.} +Resampling that is used to evaluated the performance of the feature subsets. +Uninstantiated resamplings are instantiated during construction so that all feature subsets are evaluated on the same data splits. +Already instantiated resamplings are kept unchanged.} \item{\code{measures}}{(list of \link[mlr3:Measure]{mlr3::Measure})\cr Measures to optimize. diff --git a/man/auto_fselector.Rd b/man/auto_fselector.Rd index 891b34f5..b9f1be61 100644 --- a/man/auto_fselector.Rd +++ b/man/auto_fselector.Rd @@ -2,15 +2,20 @@ % Please edit documentation in R/auto_fselector.R \name{auto_fselector} \alias{auto_fselector} -\title{Syntactic Sugar for Automatic Feature Selection} +\title{Function for Automatic Feature Selection} \usage{ auto_fselector( method, learner, resampling, - measure, + measure = NULL, term_evals = NULL, term_time = NULL, + terminator = NULL, + store_fselect_instance = TRUE, + store_benchmark_result = TRUE, + store_models = FALSE, + check_values = FALSE, ... ) } @@ -18,14 +23,16 @@ auto_fselector( \item{method}{(\code{character(1)} | \link{FSelector})\cr Key to retrieve fselector from \link{mlr_fselectors} dictionary or \link{FSelector} object.} -\item{learner}{(\link[mlr3:Learner]{mlr3::Learner}).} +\item{learner}{(\link[mlr3:Learner]{mlr3::Learner})\cr +Learner to optimize the feature subset for.} \item{resampling}{(\link[mlr3:Resampling]{mlr3::Resampling})\cr -Uninstantiated resamplings are instantiated during construction -so that all configurations are evaluated on the same data splits.} +Resampling that is used to evaluated the performance of the feature subsets. +Uninstantiated resamplings are instantiated during construction so that all feature subsets are evaluated on the same data splits. +Already instantiated resamplings are kept unchanged.} \item{measure}{(\link[mlr3:Measure]{mlr3::Measure})\cr -Measure to optimize.} +Measure to optimize. If \code{NULL}, default measure is used.} \item{term_evals}{(\code{integer(1)})\cr Number of allowed evaluations.} @@ -33,15 +40,58 @@ Number of allowed evaluations.} \item{term_time}{(\code{integer(1)})\cr Maximum allowed time in seconds.} +\item{terminator}{(\link{Terminator})\cr +Stop criterion of the feature selection.} + +\item{store_fselect_instance}{(\code{logical(1)})\cr +If \code{TRUE} (default), stores the internally created \link{FSelectInstanceSingleCrit} with all intermediate results in slot \verb{$fselect_instance}. +Is set to \code{TRUE}, if \code{store_models = TRUE}} + +\item{store_benchmark_result}{(\code{logical(1)})\cr +Store benchmark result in archive?} + +\item{store_models}{(\code{logical(1)}). +Store models in benchmark result?} + +\item{check_values}{(\code{logical(1)})\cr +Check the parameters before the evaluation and the results for +validity?} + \item{...}{(named \code{list()})\cr Named arguments to be set as parameters of the fselector.} } \value{ -\link{AutoFSelector} +\link{AutoFSelector}. } \description{ -Function to create an \link{AutoFSelector} object. +The \link{AutoFSelector} wraps a \link[mlr3:Learner]{mlr3::Learner} and augments it with an automatic feature selection. +The \code{\link[=auto_fselector]{auto_fselector()}} function creates an \link{AutoFSelector} object. +} +\details{ +The \link{AutoFSelector} is a \link[mlr3:Learner]{mlr3::Learner} which wraps another \link[mlr3:Learner]{mlr3::Learner} and performs the following steps during \verb{$train()}: +\enumerate{ +\item The wrapped (inner) learner is trained on the feature subsets via resampling. +The feature selection can be specified by providing a \link{FSelector}, a \link[bbotk:Terminator]{bbotk::Terminator}, a \link[mlr3:Resampling]{mlr3::Resampling} and a \link[mlr3:Measure]{mlr3::Measure}. +\item A final model is fit on the complete training data with the best found feature subset. +} + +During \verb{$predict()} the \link{AutoFSelector} just calls the predict method of the wrapped (inner) learner. } +\section{Resources}{ + +\itemize{ +\item \href{https://mlr3book.mlr-org.com/feature-selection.html#autofselect}{book chapter} on automatic feature selection. +} +} + +\section{Nested Resampling}{ + +Nested resampling can be performed by passing an \link{AutoFSelector} object to \code{\link[mlr3:resample]{mlr3::resample()}} or \code{\link[mlr3:benchmark]{mlr3::benchmark()}}. +To access the inner resampling results, set \code{store_fselect_instance = TRUE} and execute \code{\link[mlr3:resample]{mlr3::resample()}} or \code{\link[mlr3:benchmark]{mlr3::benchmark()}} with \code{store_models = TRUE} (see examples). +The \link[mlr3:Resampling]{mlr3::Resampling} passed to the \link{AutoFSelector} is meant to be the inner resampling, operating on the training set of an arbitrary outer resampling. +For this reason it is not feasible to pass an instantiated \link[mlr3:Resampling]{mlr3::Resampling} here. +} + \examples{ at = auto_fselector( method = "random_search", diff --git a/man/fselect.Rd b/man/fselect.Rd index 1db7d022..f76b9142 100644 --- a/man/fselect.Rd +++ b/man/fselect.Rd @@ -23,11 +23,13 @@ Key to retrieve fselector from \link{mlr_fselectors} dictionary or \link{FSelect \item{task}{(\link[mlr3:Task]{mlr3::Task})\cr Task to operate on.} -\item{learner}{(\link[mlr3:Learner]{mlr3::Learner}).} +\item{learner}{(\link[mlr3:Learner]{mlr3::Learner})\cr +Learner to optimize the feature subset for.} \item{resampling}{(\link[mlr3:Resampling]{mlr3::Resampling})\cr -Uninstantiated resamplings are instantiated during construction -so that all configurations are evaluated on the same data splits.} +Resampling that is used to evaluated the performance of the feature subsets. +Uninstantiated resamplings are instantiated during construction so that all feature subsets are evaluated on the same data splits. +Already instantiated resamplings are kept unchanged.} \item{measures}{(list of \link[mlr3:Measure]{mlr3::Measure})\cr Measures to optimize. diff --git a/man/fselect_nested.Rd b/man/fselect_nested.Rd index ca686dd4..1aa56610 100644 --- a/man/fselect_nested.Rd +++ b/man/fselect_nested.Rd @@ -23,7 +23,8 @@ Key to retrieve fselector from \link{mlr_fselectors} dictionary.} \item{task}{(\link[mlr3:Task]{mlr3::Task})\cr Task to operate on.} -\item{learner}{(\link[mlr3:Learner]{mlr3::Learner}).} +\item{learner}{(\link[mlr3:Learner]{mlr3::Learner})\cr +Learner to optimize the feature subset for.} \item{inner_resampling}{(\link[mlr3:Resampling]{mlr3::Resampling})\cr Resampling used for the inner loop.} @@ -32,7 +33,7 @@ Resampling used for the inner loop.} Resampling used for the outer loop.} \item{measure}{(\link[mlr3:Measure]{mlr3::Measure})\cr -Measure to optimize.} +Measure to optimize. If \code{NULL}, default measure is used.} \item{term_evals}{(\code{integer(1)})\cr Number of allowed evaluations.} diff --git a/tests/testthat/test_AutoFSelector.R b/tests/testthat/test_AutoFSelector.R index 9e0d3bf9..4bdc66bc 100644 --- a/tests/testthat/test_AutoFSelector.R +++ b/tests/testthat/test_AutoFSelector.R @@ -64,7 +64,7 @@ test_that("nested resampling works", { test_that("store_fselect_instance, store_benchmark_result and store_models flags work", { skip_on_cran() - te = trm("evals", n_evals = 10) + te = trm("evals", n_evals = 2) task = tsk("iris") ms = msr("classif.ce") fselector = fs("random_search") @@ -74,51 +74,51 @@ test_that("store_fselect_instance, store_benchmark_result and store_models flags store_models = TRUE) at$train(task) - assert_r6(at$fselect_instance, "FSelectInstanceSingleCrit") - assert_benchmark_result(at$fselect_instance$archive$benchmark_result) - assert_class(at$fselect_instance$archive$benchmark_result$resample_result(1)$learners[[1]]$model$classif.rpart$model, "rpart") + expect_r6(at$fselect_instance, "FSelectInstanceSingleCrit") + expect_benchmark_result(at$fselect_instance$archive$benchmark_result) + expect_class(at$fselect_instance$archive$benchmark_result$resample_result(1)$learners[[1]]$model$classif.rpart$model, "rpart") at = AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, fselector = fselector, store_fselect_instance = TRUE, store_benchmark_result = TRUE, store_models = FALSE) at$train(task) - assert_r6(at$fselect_instance, "FSelectInstanceSingleCrit") - assert_benchmark_result(at$fselect_instance$archive$benchmark_result) - assert_null(at$fselect_instance$archive$benchmark_result$resample_result(1)$learners[[1]]$model) + expect_r6(at$fselect_instance, "FSelectInstanceSingleCrit") + expect_benchmark_result(at$fselect_instance$archive$benchmark_result) + expect_null(at$fselect_instance$archive$benchmark_result$resample_result(1)$learners[[1]]$model) at = AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, fselector = fselector, store_fselect_instance = TRUE, store_benchmark_result = FALSE, store_models = FALSE) at$train(task) - assert_r6(at$fselect_instance, "FSelectInstanceSingleCrit") - assert_null(at$fselect_instance$archive$benchmark_result) + expect_r6(at$fselect_instance, "FSelectInstanceSingleCrit") + expect_null(at$fselect_instance$archive$benchmark_result) at = AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, fselector = fselector, store_fselect_instance = FALSE, store_benchmark_result = FALSE, store_models = FALSE) at$train(task) - assert_null(at$fselect_instance) + expect_null(at$fselect_instance) - expect_error(AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, - fselector = fselector, store_fselect_instance = FALSE, store_benchmark_result = TRUE, - store_models = FALSE), - regexp = "Benchmark results can only be stored if store_fselect_instance is set to TRUE", - fixed = TRUE) + at = AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, + fselector = fselector, store_fselect_instance = FALSE, store_benchmark_result = FALSE, + store_models = TRUE) + at$train(task) - expect_error(AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, - fselector = fselector, store_fselect_instance = TRUE, store_benchmark_result = FALSE, - store_models = TRUE), - regexp = "Models can only be stored if store_benchmark_result is set to TRUE", - fixed = TRUE) + expect_r6(at$fselect_instance, "FSelectInstanceSingleCrit") + expect_benchmark_result(at$fselect_instance$archive$benchmark_result) + expect_class(at$fselect_instance$archive$benchmark_result$resample_result(1)$learners[[1]]$model$classif.rpart$model, "rpart") - expect_error(AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, - fselector = fselector, store_fselect_instance = FALSE, store_benchmark_result = FALSE, - store_models = TRUE), - regexp = "Models can only be stored if store_benchmark_result is set to TRUE", - fixed = TRUE) + at = AutoFSelector$new(lrn("classif.rpart"), rsmp("holdout"), ms, te, + fselector = fselector, store_fselect_instance = FALSE, store_benchmark_result = TRUE, + store_models = FALSE) + at$train(task) + + expect_r6(at$fselect_instance, "FSelectInstanceSingleCrit") + expect_benchmark_result(at$fselect_instance$archive$benchmark_result) + expect_null(at$fselect_instance$archive$benchmark_result$resample_result(1)$learners[[1]]$model) }) test_that("AutoFSelector works with GraphLearner", { @@ -184,18 +184,18 @@ test_that("AutoFSelector get_base_learner method works", { test_that("AutoFSelector hash works #647 in mlr3", { afs_1 = AutoFSelector$new( - learner = lrn("classif.rpart"), - resampling = rsmp("holdout"), - measure = msr("classif.ce"), - terminator = trm("evals", n_evals = 4), + learner = lrn("classif.rpart"), + resampling = rsmp("holdout"), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 4), fselector = fs("random_search"), store_benchmark_result = FALSE) afs_2 = AutoFSelector$new( - learner = lrn("classif.rpart"), - resampling = rsmp("holdout"), - measure = msr("classif.ce"), - terminator = trm("evals", n_evals = 4), + learner = lrn("classif.rpart"), + resampling = rsmp("holdout"), + measure = msr("classif.ce"), + terminator = trm("evals", n_evals = 4), fselector = fs("random_search"), store_benchmark_result = TRUE) diff --git a/tests/testthat/test_FSelectorRFE.R b/tests/testthat/test_FSelectorRFE.R index 8933adfc..44206e29 100644 --- a/tests/testthat/test_FSelectorRFE.R +++ b/tests/testthat/test_FSelectorRFE.R @@ -83,7 +83,7 @@ test_that("learner without importance method throw an error", { task = tsk("pima"), learner = learner, resampling = rsmp("holdout"), - measure = msr("classif.ce"), + measures = msr("classif.ce"), store_models = TRUE ), "does not work with") })