From 2fa586827cb0829c1401700d170e5c80b96d92eb Mon Sep 17 00:00:00 2001 From: "C. Regouby" Date: Sat, 20 Jul 2024 15:03:36 +0200 Subject: [PATCH] Add missing Node help. fix some help typos. --- DESCRIPTION | 2 +- R/hardhat.R | 4 ++++ R/model.R | 2 +- R/tab-network.R | 2 +- man/tabnet_config.Rd | 2 +- man/tabnet_fit.Rd | 2 ++ man/tabnet_nn.Rd | 2 +- man/tabnet_pretrain.Rd | 2 ++ tests/spelling.R | 3 +++ 9 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 tests/spelling.R diff --git a/DESCRIPTION b/DESCRIPTION index ed33b75..fcf265c 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -66,5 +66,5 @@ Config/testthat/parallel: false Config/testthat/start-first: interface, explain, params Encoding: UTF-8 Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.1 +RoxygenNote: 7.3.2 Language: en-US diff --git a/R/hardhat.R b/R/hardhat.R index 2398f4a..5f68d5c 100644 --- a/R/hardhat.R +++ b/R/hardhat.R @@ -8,6 +8,8 @@ #' * A __matrix__ of predictors. #' * A __recipe__ specifying a set of preprocessing steps #' created from [recipes::recipe()]. +#' * A __Node__ where tree will be used as hierarchical outcome, +#' and attributes will be used as predictors. #' #' The predictor data should be standardized (e.g. centered or scaled). #' The model treats categorical predictors internally thus, you don't need to @@ -244,6 +246,8 @@ new_tabnet_fit <- function(fit, blueprint) { #' * A __matrix__ of predictors. #' * A __recipe__ specifying a set of preprocessing steps #' created from [recipes::recipe()]. +#' * A __Node__ where tree will be used as hierarchical outcome, +#' and attributes will be used as predictors. #' #' The predictor data should be standardized (e.g. centered or scaled). #' The model treats categorical predictors internally thus, you don't need to diff --git a/R/model.R b/R/model.R index df8c76e..a027c0f 100644 --- a/R/model.R +++ b/R/model.R @@ -83,7 +83,7 @@ resolve_data <- function(x, y) { #' @param learn_rate initial learning rate for the optimizer. #' @param optimizer the optimization method. currently only `"adam"` is supported, #' you can also pass any torch optimizer function. -#' @param valid_split (`[0, 1)`) The fraction of the dataset used for validation. +#' @param valid_split In \[0, 1). The fraction of the dataset used for validation. #' (default = 0 means no split) #' @param num_independent Number of independent Gated Linear Units layers at each step of the encoder. #' Usual values range from 1 to 5. diff --git a/R/tab-network.R b/R/tab-network.R index aee38d0..15b63b7 100644 --- a/R/tab-network.R +++ b/R/tab-network.R @@ -418,7 +418,7 @@ tabnet_no_embedding <- torch::nn_module( #' @param n_shared Number of shared GLU layer in each GLU block of the encoder. #' @param epsilon Avoid log(0), this should be kept very low. #' @param virtual_batch_size Batch size for Ghost Batch Normalization. -#' @param momentum Float value between 0 and 1 which will be used for momentum in all batch norm. +#' @param momentum Numerical value between 0 and 1 which will be used for momentum in all batch norm. #' @param mask_type Either "sparsemax" or "entmax" : this is the masking function to use. #' @export tabnet_nn <- torch::nn_module( diff --git a/man/tabnet_config.Rd b/man/tabnet_config.Rd index e92b5bc..054f44d 100644 --- a/man/tabnet_config.Rd +++ b/man/tabnet_config.Rd @@ -81,7 +81,7 @@ block, either \code{"sparsemax"} or \code{"entmax"}.Defaults to \code{"sparsemax \item{virtual_batch_size}{(int) Size of the mini batches used for "Ghost Batch Normalization" (default=256^2)} -\item{valid_split}{(\verb{[0, 1)}) The fraction of the dataset used for validation. +\item{valid_split}{In [0, 1). The fraction of the dataset used for validation. (default = 0 means no split)} \item{learn_rate}{initial learning rate for the optimizer.} diff --git a/man/tabnet_fit.Rd b/man/tabnet_fit.Rd index e62c2d4..729a8f9 100644 --- a/man/tabnet_fit.Rd +++ b/man/tabnet_fit.Rd @@ -58,6 +58,8 @@ tabnet_fit(x, ...) \item A \strong{matrix} of predictors. \item A \strong{recipe} specifying a set of preprocessing steps created from \code{\link[recipes:recipe]{recipes::recipe()}}. +\item A \strong{Node} where tree will be used as hierarchical outcome, +and attributes will be used as predictors. } The predictor data should be standardized (e.g. centered or scaled). diff --git a/man/tabnet_nn.Rd b/man/tabnet_nn.Rd index 3ea4339..3f042df 100644 --- a/man/tabnet_nn.Rd +++ b/man/tabnet_nn.Rd @@ -52,7 +52,7 @@ specific size.} \item{virtual_batch_size}{Batch size for Ghost Batch Normalization.} -\item{momentum}{Float value between 0 and 1 which will be used for momentum in all batch norm.} +\item{momentum}{Numerical value between 0 and 1 which will be used for momentum in all batch norm.} \item{mask_type}{Either "sparsemax" or "entmax" : this is the masking function to use.} } diff --git a/man/tabnet_pretrain.Rd b/man/tabnet_pretrain.Rd index ca38c59..0d85757 100644 --- a/man/tabnet_pretrain.Rd +++ b/man/tabnet_pretrain.Rd @@ -55,6 +55,8 @@ tabnet_pretrain(x, ...) \item A \strong{matrix} of predictors. \item A \strong{recipe} specifying a set of preprocessing steps created from \code{\link[recipes:recipe]{recipes::recipe()}}. +\item A \strong{Node} where tree will be used as hierarchical outcome, +and attributes will be used as predictors. } The predictor data should be standardized (e.g. centered or scaled). diff --git a/tests/spelling.R b/tests/spelling.R new file mode 100644 index 0000000..6713838 --- /dev/null +++ b/tests/spelling.R @@ -0,0 +1,3 @@ +if(requireNamespace('spelling', quietly = TRUE)) + spelling::spell_check_test(vignettes = TRUE, error = FALSE, + skip_on_cran = TRUE)