From 9da9e4ebd1ff1fedc2d444b8fe02f40bb02986d6 Mon Sep 17 00:00:00 2001 From: cregouby Date: Wed, 27 Sep 2023 11:01:39 +0200 Subject: [PATCH] Add french FR translation (#131) * switch to base:: messaging system for the translation and add FR translation --- DESCRIPTION | 1 - NEWS.md | 1 + R/dials.R | 2 +- R/explain.R | 7 +- R/hardhat.R | 49 +++--- R/model.R | 26 ++-- R/parsnip.R | 2 +- R/pretraining.R | 20 ++- R/tab-network.R | 10 +- po/R-fr.po | 190 +++++++++++++++++++++++ po/R-tabnet.pot | 163 +++++++++++++++++++ po/fr.mo | Bin 0 -> 4874 bytes po/fr.po | 130 ++++++++++++++++ tests/testthat/test-hardhat_parameters.R | 9 +- tests/testthat/test_translations.R | 24 +++ 15 files changed, 571 insertions(+), 63 deletions(-) create mode 100644 po/R-fr.po create mode 100644 po/R-tabnet.pot create mode 100644 po/fr.mo create mode 100644 po/fr.po create mode 100644 tests/testthat/test_translations.R diff --git a/DESCRIPTION b/DESCRIPTION index 7948fada..1a1969f1 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -22,7 +22,6 @@ Imports: torch (>= 0.4.0), hardhat (>= 1.3.0), magrittr, - glue, progress, rlang, methods, diff --git a/NEWS.md b/NEWS.md index 98d6f80d..a47a2eee 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,6 +1,7 @@ # tabnet (development version) ## New features +* add FR translation (#131) * `tabnet_pretrain()` now allows different GLU blocks in GLU layers in encoder and in decoder through the `config()` parameters `num_idependant_decoder` and `num_shared_decoder` (#129) * {tabnet} now allows hierarchical multi-label classification through {data.tree} hierarchical `Node` dataset. (#126) * Add `reduce_on_plateau` as option for `lr_scheduler` at `tabnet_config()` (@SvenVw, #120) diff --git a/R/dials.R b/R/dials.R index 48cd9eb0..0c49d2d1 100644 --- a/R/dials.R +++ b/R/dials.R @@ -1,6 +1,6 @@ check_dials <- function() { if (!requireNamespace("dials", quietly = TRUE)) - rlang::abort("Package \"dials\" needed for this function to work. Please install it.") + stop("Package \"dials\" needed for this function to work. Please install it.", call. = FALSE) } diff --git a/R/explain.R b/R/explain.R index 28fe1b26..2394efe7 100644 --- a/R/explain.R +++ b/R/explain.R @@ -44,10 +44,9 @@ tabnet_explain <- function(object, new_data) { #' @export #' @rdname tabnet_explain tabnet_explain.default <- function(object, new_data) { - stop( - "`tabnet_explain()` is not defined for a '", class(object)[1], "'.", - call. = FALSE - ) + stop(domain=NA, + gettextf("`tabnet_explain()` is not defined for a '%s'.", class(object)[1]), + call. = FALSE) } #' @export diff --git a/R/hardhat.R b/R/hardhat.R index 568cd50a..24e47aef 100644 --- a/R/hardhat.R +++ b/R/hardhat.R @@ -104,10 +104,9 @@ tabnet_fit <- function(x, ...) { #' @export #' @rdname tabnet_fit tabnet_fit.default <- function(x, ...) { - stop( - "`tabnet_fit()` is not defined for a '", class(x)[1], "'.", - call. = FALSE - ) + stop(domain=NA, + gettextf("`tabnet_fit()` is not defined for a '%s'.", class(x)[1]), + call. = FALSE) } #' @export @@ -293,10 +292,9 @@ tabnet_pretrain <- function(x, ...) { #' @export #' @rdname tabnet_pretrain tabnet_pretrain.default <- function(x, ...) { - stop( - "`tabnet_pretrain()` is not defined for a '", class(x)[1], "'.", - call. = FALSE - ) + stop(domain=NA, + gettextf("`tabnet_pretrain()` is not defined for a '%s'.", class(x)[1]), + call. = FALSE) } @@ -390,13 +388,14 @@ tabnet_bridge <- function(processed, config = tabnet_config(), tabnet_model, fro epoch_shift <- 0L if (!(is.null(tabnet_model) || inherits(tabnet_model, "tabnet_fit") || inherits(tabnet_model, "tabnet_pretrain"))) - rlang::abort(glue::glue("{tabnet_model} is not recognised as a proper TabNet model")) + stop(gettextf("'%s' is not recognised as a proper TabNet model", tabnet_model), + call. = FALSE) if (!is.null(from_epoch) && !is.null(tabnet_model)) { # model must be loaded from checkpoint if (from_epoch > (length(tabnet_model$fit$checkpoints) * tabnet_model$fit$config$checkpoint_epoch)) - rlang::abort(glue::glue("The model was trained for less than {from_epoch} epochs")) + stop(gettextf("The model was trained for less than '%s' epochs", from_epoch), call. = FALSE) # find closest checkpoint for that epoch closest_checkpoint <- from_epoch %/% tabnet_model$fit$config$checkpoint_epoch @@ -408,7 +407,7 @@ tabnet_bridge <- function(processed, config = tabnet_config(), tabnet_model, fro } if (task == "supervised") { if (sum(is.na(outcomes)) > 0) { - rlang::abort(glue::glue("Error: found missing values in the `{names(outcomes)}` outcome column.")) + stop(gettextf("Found missing values in the `%s` outcome column.", names(outcomes)), call. = FALSE) } if (is.null(tabnet_model)) { # new supervised model needs network initialization @@ -418,7 +417,7 @@ tabnet_bridge <- function(processed, config = tabnet_config(), tabnet_model, fro } else if (!check_net_is_empty_ptr(tabnet_model) && inherits(tabnet_model, "tabnet_fit")) { # resume training from supervised if (!identical(processed$blueprint, tabnet_model$blueprint)) - rlang::abort("Model dimensions don't match.") + stop("Model dimensions don't match.", call. = FALSE) # model is available from tabnet_model$serialized_net m <- reload_model(tabnet_model$serialized_net) @@ -443,7 +442,7 @@ tabnet_bridge <- function(processed, config = tabnet_config(), tabnet_model, fro tabnet_model$fit$network <- reload_model(tabnet_model$fit$checkpoints[[last_checkpoint]]) epoch_shift <- last_checkpoint * tabnet_model$fit$config$checkpoint_epoch - } else rlang::abort(glue::glue("No model serialized weight can be found in {tabnet_model} check the model history")) + } else stop(gettextf("No model serialized weight can be found in `%s`, check the model history", tabnet_model), call. = FALSE) fit_lst <- tabnet_train_supervised(tabnet_model, predictors, outcomes, config = config, epoch_shift) return(new_tabnet_fit(fit_lst, blueprint = processed$blueprint)) @@ -485,7 +484,7 @@ predict_tabnet_bridge <- function(type, object, predictors, epoch, batch_size) { if (!is.null(epoch)) { if (epoch > (length(object$fit$checkpoints) * object$fit$config$checkpoint_epoch)) - rlang::abort(glue::glue("The model was trained for less than {epoch} epochs")) + stop(gettextf("The model was trained for less than `%s` epochs", epoch), call. = FALSE) # find closest checkpoint for that epoch ind <- epoch %/% object$fit$config$checkpoint_epoch @@ -530,7 +529,7 @@ model_pretrain_to_fit <- function(obj, x, y, config = tabnet_config()) { m <- reload_model(obj$serialized_net) if (m$input_dim != tabnet_model_lst$network$input_dim) - rlang::abort("Model dimensions don't match.") + stop("Model dimensions don't match.", call. = FALSE) # perform update of selected weights into new tabnet_model m_stat_dict <- m$state_dict() @@ -582,7 +581,7 @@ check_type <- function(outcome_ptype, type = NULL) { outcome_all_numeric <- all(purrr::map_lgl(outcome_ptype, is.numeric)) if (!outcome_all_numeric && !outcome_all_factor) - rlang::abort(glue::glue("Mixed multi-outcome type '{unique(purrr::map_chr(outcome_ptype, ~class(.x)[[1]]))}' is not supported")) + stop(gettextf("Mixed multi-outcome type '%s' is not supported", unique(purrr::map_chr(outcome_ptype, ~class(.x)[[1]]))), call. = FALSE) if (is.null(type)) { if (outcome_all_factor) @@ -590,17 +589,17 @@ check_type <- function(outcome_ptype, type = NULL) { else if (outcome_all_numeric) type <- "numeric" else if (ncol(outcome_ptype) == 1) - rlang::abort(glue::glue("Unknown outcome type '{class(outcome_ptype)}'")) + stop(gettextf("Unknown outcome type '%s'", class(outcome_ptype)), call. = FALSE) } type <- rlang::arg_match(type, c("numeric", "prob", "class")) if (outcome_all_factor) { if (!type %in% c("prob", "class")) - rlang::abort(glue::glue("Outcome is factor and the prediction type is '{type}'.")) + stop(gettextf("Outcome is factor and the prediction type is '%s'.", type), call. = FALSE) } else if (outcome_all_numeric) { if (type != "numeric") - rlang::abort(glue::glue("Outcome is numeric and the prediction type is '{type}'.")) + stop(gettextf("Outcome is numeric and the prediction type is '%s'.", type), call. = FALSE) } invisible(type) @@ -638,15 +637,15 @@ check_compliant_node <- function(node) { reserved_names <- c(paste0("level_", c(1:node_height)), data.tree::NODE_RESERVED_NAMES_CONST) actual_names <- colnames(node)[!colnames(node) %in% "pathString"] } else { - rlang::abort("The provided hierarchical object is not recognized with a valid format that can be checked") + stop("The provided hierarchical object is not recognized with a valid format that can be checked", call. = FALSE) } if (any(actual_names %in% reserved_names)) { - rlang::abort(paste0( - "The attributes or colnames in the provided hierarchical object use the following reserved names : '", - paste(actual_names[actual_names %in% reserved_names], collapse = "', '"), - "'. Please change those names as they will lead to unexpected tabnet behavior." - )) + stop(domain=NA, + gettextf("The attributes or colnames in the provided hierarchical object use the following reserved names : '%s'. Please change those names as they will lead to unexpected tabnet behavior.", + paste(actual_names[actual_names %in% reserved_names], collapse = "', '") + ), + call. = FALSE) } invisible(node) diff --git a/R/model.R b/R/model.R index 4d9bfbce..b2bf32f8 100644 --- a/R/model.R +++ b/R/model.R @@ -239,7 +239,7 @@ resolve_loss <- function(config, dtype) { # cross entropy loss is required loss_fn <- torch::nn_cross_entropy_loss() else - rlang::abort(glue::glue("{loss} is not a valid loss for outcome of type {dtype}")) + stop(gettextf("`%s` is not a valid loss for outcome of type %s", loss, dtype), call. = FALSE) loss_fn } @@ -250,7 +250,7 @@ resolve_early_stop_monitor <- function(early_stopping_monitor, valid_split) { else if (early_stopping_monitor %in% c("train_loss", "auto")) early_stopping_monitor <- "train_loss" else - rlang::abort(glue::glue("{early_stopping_monitor} is not a valid early-stopping metric to monitor with `valid_split` = {valid_split}")) + stop(gettextf("%s is not a valid early-stopping metric to monitor with `valid_split` = %s", early_stopping_monitor, valid_split), call. = FALSE) early_stopping_monitor } @@ -506,7 +506,7 @@ tabnet_train_supervised <- function(obj, x, y, config = tabnet_config(), epoch_s if (config$optimizer == "adam") optimizer <- torch::optim_adam(network$parameters, lr = config$learn_rate) else - rlang::abort("Currently only the 'adam' optimizer is supported.") + stop("Currently only the 'adam' optimizer is supported.", call. = FALSE) } @@ -520,7 +520,7 @@ tabnet_train_supervised <- function(obj, x, y, config = tabnet_config(), epoch_s } else if (config$lr_scheduler == "step") { scheduler <- torch::lr_step(optimizer, config$step_size, config$lr_decay) } else { - rlang::abort("Currently only the 'step' and 'reduce_on_plateau' scheduler are supported.") + stop("Currently only the 'step' and 'reduce_on_plateau' scheduler are supported.", call. = FALSE) } # restore previous metrics & checkpoints @@ -565,12 +565,11 @@ tabnet_train_supervised <- function(obj, x, y, config = tabnet_config(), epoch_s metrics[[epoch]][["valid"]] <- transpose_metrics(valid_metrics) } - message <- sprintf("[Epoch %03d] Loss: %3f", epoch, mean(metrics[[epoch]]$train$loss)) - if (has_valid) - message <- paste0(message, sprintf(" Valid loss: %3f", mean(metrics[[epoch]]$valid$loss))) + if (config$verbose & !has_valid) + message(gettextf("[Epoch %03d] Loss: %3f", epoch, mean(metrics[[epoch]]$train$loss))) + if (config$verbose & has_valid) + message(gettextf("[Epoch %03d] Loss: %3f, Valid loss: %3f", epoch, mean(metrics[[epoch]]$train$loss), mean(metrics[[epoch]]$valid$loss))) - if (config$verbose) - rlang::inform(message) # Early-stopping checks if (config$early_stopping && config$early_stopping_monitor=="valid_loss"){ @@ -585,7 +584,7 @@ tabnet_train_supervised <- function(obj, x, y, config = tabnet_config(), epoch_s patience_counter <- patience_counter + 1 if (patience_counter >= config$early_stopping_patience){ if (config$verbose) - rlang::inform(sprintf("Early stopping at epoch %03d", epoch)) + message(gettextf("Early stopping at epoch %03d", epoch)) break } } else { @@ -610,9 +609,10 @@ tabnet_train_supervised <- function(obj, x, y, config = tabnet_config(), epoch_s if(!config$skip_importance) { importance_sample_size <- config$importance_sample_size if (is.null(config$importance_sample_size) && train_ds$.length() > 1e5) { - rlang::warn(c(glue::glue("Computing importances for a dataset with size {train_ds$.length()}."), - "This can consume too much memory. We are going to use a sample of size 1e5", - "You can disable this message by using the `importance_sample_size` argument.")) + warning( + gettextf( + "Computing importances for a dataset with size %s. This can consume too much memory. We are going to use a sample of size 1e5, You can disable this message by using the `importance_sample_size` argument.", + train_ds$.length())) importance_sample_size <- 1e5 } indexes <- as.numeric(torch::torch_randint( diff --git a/R/parsnip.R b/R/parsnip.R index dbbc41c2..ccb867fc 100644 --- a/R/parsnip.R +++ b/R/parsnip.R @@ -242,7 +242,7 @@ tabnet <- function(mode = "unknown", epochs = NULL, penalty = NULL, batch_size = num_independent = NULL, num_shared = NULL, momentum = NULL) { if (!requireNamespace("parsnip", quietly = TRUE)) - rlang::abort("Package \"parsnip\" needed for this function to work. Please install it.") + stop("Package \"parsnip\" needed for this function to work. Please install it.", call. = FALSE) if (!tabnet_env$parsnip_added) { add_parsnip_tabnet() diff --git a/R/pretraining.R b/R/pretraining.R index 861a6b4f..6fe9571d 100644 --- a/R/pretraining.R +++ b/R/pretraining.R @@ -136,7 +136,7 @@ tabnet_train_unsupervised <- function(x, config = tabnet_config(), epoch_shift = if (config$optimizer == "adam") optimizer <- torch::optim_adam(network$parameters, lr = config$learn_rate) else - rlang::abort("Currently only the 'adam' optimizer is supported.") + stop("Currently only the 'adam' optimizer is supported.", call. = FALSE) } @@ -150,7 +150,7 @@ tabnet_train_unsupervised <- function(x, config = tabnet_config(), epoch_shift = } else if (config$lr_scheduler == "step") { scheduler <- torch::lr_step(optimizer, config$step_size, config$lr_decay) } else { - rlang::abort("Currently only the 'step' and 'reduce_on_plateau' scheduler are supported.") + stop("Currently only the 'step' and 'reduce_on_plateau' scheduler are supported.", call. = FALSE) } # initialize metrics & checkpoints @@ -195,12 +195,10 @@ tabnet_train_unsupervised <- function(x, config = tabnet_config(), epoch_shift = metrics[[epoch]][["valid"]] <- transpose_metrics(valid_metrics) } - message <- sprintf("[Epoch %03d] Loss: %3f", epoch, mean(metrics[[epoch]]$train$loss)) - if (has_valid) - message <- paste0(message, sprintf(" Valid loss: %3f", mean(metrics[[epoch]]$valid$loss))) - - if (config$verbose) - rlang::inform(message) + if (config$verbose & !has_valid) + message(gettextf("[Epoch %03d] Loss: %3f", epoch, mean(metrics[[epoch]]$train$loss))) + if (config$verbose & has_valid) + message(gettextf("[Epoch %03d] Loss: %3f, Valid loss: %3f", epoch, mean(metrics[[epoch]]$train$loss), mean(metrics[[epoch]]$valid$loss))) # Early-stopping checks if (config$early_stopping && config$early_stopping_monitor=="valid_loss"){ @@ -240,9 +238,9 @@ tabnet_train_unsupervised <- function(x, config = tabnet_config(), epoch_shift = importance_sample_size <- config$importance_sample_size if (is.null(config$importance_sample_size) && train_ds$.length() > 1e5) { - rlang::warn(c(glue::glue("Computing importances for a dataset with size {train_ds$.length()}."), - "This can consume too much memory. We are going to use a sample of size 1e5", - "You can disable this message by using the `importance_sample_size` argument.")) + warning(domain=NA, + gettextf("Computing importances for a dataset with size %s. This can consume too much memory. We are going to use a sample of size 1e5. You can disable this message by using the `importance_sample_size` argument.", train_ds$.length()), + call. = FALSE) importance_sample_size <- 1e5 } indexes <- as.numeric(torch::torch_randint( diff --git a/R/tab-network.R b/R/tab-network.R index 5dea1193..a8ffc721 100644 --- a/R/tab-network.R +++ b/R/tab-network.R @@ -266,9 +266,9 @@ tabnet_pretrainer <- torch::nn_module( self$initial_bn <- torch::nn_batch_norm1d(self$input_dim, momentum = momentum) if (self$n_steps <= 0) - stop("n_steps should be a positive integer.") + stop("'n_steps' should be a positive integer.") if (self$n_independent == 0 && self$n_shared == 0) - stop("n_shared and n_independant can't be both zero.") + stop("'n_shared' and 'n_independant' can't be both zero.") self$virtual_batch_size <- virtual_batch_size self$embedder <- embedding_generator(input_dim, cat_dims, cat_idxs, cat_emb_dim) @@ -452,9 +452,9 @@ tabnet_nn <- torch::nn_module( self$mask_type <- mask_type if (self$n_steps <= 0) - stop("n_steps should be a positive integer.") + stop("'n_steps' should be a positive integer.") if (self$n_independent == 0 && self$n_shared == 0) - stop("n_shared and n_independant can't be both zero.") + stop("'n_shared' and 'n_independant' can't be both zero.") self$virtual_batch_size <- virtual_batch_size self$embedder <- embedding_generator(input_dim, cat_dims, cat_idxs, cat_emb_dim) @@ -494,7 +494,7 @@ attentive_transformer <- torch::nn_module( else if (mask_type == "entmax") self$selector <- entmax(dim = -1) else - stop("Please choose either sparsemax or entmax as masktype") + stop("Please choose either 'sparsemax' or 'entmax' as 'mask_type'") }, forward = function(priors, processed_feat) { diff --git a/po/R-fr.po b/po/R-fr.po new file mode 100644 index 00000000..11d69ef4 --- /dev/null +++ b/po/R-fr.po @@ -0,0 +1,190 @@ +msgid "" +msgstr "" +"Project-Id-Version: tabnet 0.4.0.9000\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-09-26 20:16+0200\n" +"PO-Revision-Date: 2023-09-26 21:50+0200\n" +"Last-Translator: Christophe Regouby \n" +"Language-Team: fr\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 3.0.1\n" + +#: dials.R:3 +msgid "Package \"dials\" needed for this function to work. Please install it." +msgstr "" +"Le package \"dials\" doit être installé pour cette fonction. Merci de " +"l'installer." + +#: explain.R:48 +#, c-format +msgid "`tabnet_explain()` is not defined for a '%s'." +msgstr "`tabnet_explain()` n’est pas défini pour un '%s'." + +#: hardhat.R:108 +#, c-format +msgid "`tabnet_fit()` is not defined for a '%s'." +msgstr "`tabnet_fit()` n’est pas défini pour un '%s'." + +#: hardhat.R:296 +#, c-format +msgid "`tabnet_pretrain()` is not defined for a '%s'." +msgstr "`tabnet_pretrain()` n’est pas défini pour un '%s'." + +#: hardhat.R:391 +#, c-format +msgid "'%s' is not recognised as a proper TabNet model" +msgstr "'%s' n’est pas reconnu comme un modèle TabNet correct" + +#: hardhat.R:398 +#, c-format +msgid "The model was trained for less than '%s' epochs" +msgstr "Le modèle a été entrainé sur moins de '%s' époques" + +#: hardhat.R:410 +#, c-format +msgid "Found missing values in the `%s` outcome column." +msgstr "Il y a des valeurs manquantes dans la colonne de résultats `%s`." + +#: hardhat.R:420 hardhat.R:532 +msgid "Model dimensions don't match." +msgstr "Les dimensions ne correspondent pas entre les modèles." + +#: hardhat.R:445 +#, c-format +msgid "" +"No model serialized weight can be found in `%s`, check the model history" +msgstr "" +"Il n’y a pas de points sérialisés de modèle dans `%s`, veuillez vérifier " +"l’historique du modèle" + +#: hardhat.R:487 +#, c-format +msgid "The model was trained for less than `%s` epochs" +msgstr "Le modèle a été entrainé sur moins de '%s' époques" + +#: hardhat.R:584 +#, c-format +msgid "Mixed multi-outcome type '%s' is not supported" +msgstr "Le type '%s' n’est pas supporté pour des modèles multi-résultat" + +#: hardhat.R:592 +#, c-format +msgid "Unknown outcome type '%s'" +msgstr "Le type `%s` est inconnu pour une colonne de résultat" + +#: hardhat.R:599 +#, c-format +msgid "Outcome is factor and the prediction type is '%s'." +msgstr "" +"La colonne de résultats est catégorielle et la prédiction est de type '%s'." + +#: hardhat.R:602 +#, c-format +msgid "Outcome is numeric and the prediction type is '%s'." +msgstr "" +"La colonne de résultats est numérique et la prédiction est de type '%s'." + +#: hardhat.R:640 +msgid "" +"The provided hierarchical object is not recognized with a valid format that " +"can be checked" +msgstr "" +"L’objet hiérarchique fournit n’est pas reconnu dans un format valide qui " +"peut être vérifié" + +#: hardhat.R:645 +#, c-format +msgid "" +"The attributes or colnames in the provided hierarchical object use the " +"following reserved names : '%s'. Please change those names as they will lead " +"to unexpected tabnet behavior." +msgstr "" +"Les attributs ou noms de colonnes dans l’objet hiérarchique fournit utilise " +"les noms réservés suivants : ‘%s’. Veuillez changer ces noms pour éviter un " +"comportement imprévisible de TabNet." + +#: model.R:242 +#, c-format +msgid "`%s` is not a valid loss for outcome of type %s" +msgstr "" +"`%s` n’est pas une fonction objectif valide pour les colonnes de résultat de " +"type %s" + +#: model.R:253 +#, c-format +msgid "" +"%s is not a valid early-stopping metric to monitor with `valid_split` = %s" +msgstr "" +"%s n’est pas une métrique valide d’arrêt anticipé avec `valid_split = %s`" + +#: model.R:509 pretraining.R:139 +msgid "Currently only the 'adam' optimizer is supported." +msgstr "Seule la fonction d’optimisation ‘adam’ est supportée pour l’instant." + +#: model.R:523 pretraining.R:153 +msgid "" +"Currently only the 'step' and 'reduce_on_plateau' scheduler are supported." +msgstr "" +"Seule les planifications ‘step’ et ‘reduce_on_plateau’ sont supportées pour " +"l’instant." + +#: model.R:569 pretraining.R:199 +#, c-format +msgid "[Epoch %03d] Loss: %3f" +msgstr "" + +#: model.R:571 pretraining.R:201 +#, c-format +msgid "[Epoch %03d] Loss: %3f, Valid loss: %3fs" +msgstr "" + +#: model.R:587 +#, c-format +msgid "Early stopping at epoch %03d" +msgstr "Arrêt anticipé à l’époque %03d" + +#: model.R:614 +#, c-format +msgid "" +"Computing importances for a dataset with size %s. This can consume too much " +"memory. We are going to use a sample of size 1e5, You can disable this " +"message by using the `importance_sample_size` argument." +msgstr "" +"Calcul de l’importance sur un jeu de données de taille %s. Il se peut que " +"cela consomme trop de mémoire. Aussi le jeu de donnée sera réduit " +"aléatoirement à une taille de 1e5. Vous pouvez rendre ce message silencieux " +"en configurant l’argument `importance_sample_size`." + +#: parsnip.R:245 +msgid "" +"Package \"parsnip\" needed for this function to work. Please install it." +msgstr "" +"Le package \"parsnip\" est nécessaire pour exécuter cette fonction, Veuillez " +"l'installer." + +#: pretraining.R:242 +#, c-format +msgid "" +"Computing importances for a dataset with size %s. This can consume too much " +"memory. We are going to use a sample of size 1e5. You can disable this " +"message by using the `importance_sample_size` argument." +msgstr "" +"Calcul de l’importance sur un jeu de données de taille %s. Il se peut que " +"cela consomme trop de mémoire. Aussi le jeu de donnée sera réduit " +"aléatoirement à une taille de 1e5. Vous pouvez rendre ce message silencieux " +"en configurant l’argument `importance_sample_size`." + +#: tab-network.R:269 tab-network.R:455 +msgid "'n_steps' should be a positive integer." +msgstr "’n_steps’ doit être un nombre entier positif." + +#: tab-network.R:271 tab-network.R:457 +msgid "'n_shared' and 'n_independant' can't be both zero." +msgstr "’n_shared’ et ’n_independant’ ne peuvent pas être nuls simultanément." + +#: tab-network.R:497 +msgid "Please choose either 'sparsemax' or 'entmax' as 'mask_type'" +msgstr "Vous devez choisir ‘sparsemax’ ou ‘entmax’ pour ‘mask_type’" diff --git a/po/R-tabnet.pot b/po/R-tabnet.pot new file mode 100644 index 00000000..3707e568 --- /dev/null +++ b/po/R-tabnet.pot @@ -0,0 +1,163 @@ +msgid "" +msgstr "" +"Project-Id-Version: tabnet 0.4.0.9000\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-09-26 19:59+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=CHARSET\n" +"Content-Transfer-Encoding: 8bit\n" + +#: dials.R:3 +msgid "Package \"dials\" needed for this function to work. Please install it." +msgstr "" + +#: explain.R:48 +#, c-format +msgid "`tabnet_explain()` is not defined for a '%s'." +msgstr "" + +#: hardhat.R:108 +#, c-format +msgid "`tabnet_fit()` is not defined for a '%s'." +msgstr "" + +#: hardhat.R:296 +#, c-format +msgid "`tabnet_pretrain()` is not defined for a '%s'." +msgstr "" + +#: hardhat.R:391 +#, c-format +msgid "'%s' is not recognised as a proper TabNet model" +msgstr "" + +#: hardhat.R:398 +#, c-format +msgid "The model was trained for less than '%s' epochs" +msgstr "" + +#: hardhat.R:410 +#, c-format +msgid "Error: found missing values in the `%s` outcome column." +msgstr "" + +#: hardhat.R:420 hardhat.R:532 +msgid "Model dimensions don't match." +msgstr "" + +#: hardhat.R:445 +#, c-format +msgid "" +"No model serialized weight can be found in `%s`, check the model history" +msgstr "" + +#: hardhat.R:487 +#, c-format +msgid "The model was trained for less than `%s` epochs" +msgstr "" + +#: hardhat.R:584 +#, c-format +msgid "Mixed multi-outcome type '%s' is not supported" +msgstr "" + +#: hardhat.R:592 +#, c-format +msgid "Unknown outcome type '%s'" +msgstr "" + +#: hardhat.R:599 +#, c-format +msgid "Outcome is factor and the prediction type is '%s'." +msgstr "" + +#: hardhat.R:602 +#, c-format +msgid "Outcome is numeric and the prediction type is '%s'." +msgstr "" + +#: hardhat.R:640 +msgid "" +"The provided hierarchical object is not recognized with a valid format that " +"can be checked" +msgstr "" + +#: hardhat.R:645 +#, c-format +msgid "" +"The attributes or colnames in the provided hierarchical object use the " +"following reserved names : '%s'. Please change those names as they will lead " +"to unexpected tabnet behavior." +msgstr "" + +#: model.R:242 +#, c-format +msgid "`%s` is not a valid loss for outcome of type %s" +msgstr "" + +#: model.R:253 +#, c-format +msgid "" +"%s is not a valid early-stopping metric to monitor with `valid_split` = %s" +msgstr "" + +#: model.R:509 pretraining.R:139 +msgid "Currently only the 'adam' optimizer is supported." +msgstr "" + +#: model.R:523 pretraining.R:153 +msgid "" +"Currently only the 'step' and 'reduce_on_plateau' scheduler are supported." +msgstr "" + +#: model.R:569 pretraining.R:199 +#, c-format +msgid "[Epoch %03d] Loss: %3f" +msgstr "" + +#: model.R:571 pretraining.R:201 +#, c-format +msgid "[Epoch %03d] Loss: %3f, Valid loss: %3fs" +msgstr "" + +#: model.R:587 +#, c-format +msgid "Early stopping at epoch %03d" +msgstr "" + +#: model.R:614 +#, c-format +msgid "" +"Computing importances for a dataset with size %s. This can consume too much " +"memory. We are going to use a sample of size 1e5, You can disable this " +"message by using the `importance_sample_size` argument." +msgstr "" + +#: parsnip.R:245 +msgid "" +"Package \"parsnip\" needed for this function to work. Please install it." +msgstr "" + +#: pretraining.R:242 +#, c-format +msgid "" +"Computing importances for a dataset with size %s. This can consume too much " +"memory. We are going to use a sample of size 1e5. You can disable this " +"message by using the `importance_sample_size` argument." +msgstr "" + +#: tab-network.R:269 tab-network.R:455 +msgid "'n_steps' should be a positive integer." +msgstr "" + +#: tab-network.R:271 tab-network.R:457 +msgid "'n_shared' and 'n_independant' can't be both zero." +msgstr "" + +#: tab-network.R:497 +msgid "Please choose either 'sparsemax' or 'entmax' as 'mask_type'" +msgstr "" diff --git a/po/fr.mo b/po/fr.mo new file mode 100644 index 0000000000000000000000000000000000000000..85db45ef64fd8d8a86695edfef27bb3435ba402a GIT binary patch literal 4874 zcmeH~ON<;x8OI96|-Uy^Ruo@yVll9sddqWOZFvO8xgiUO??ACPG>{Q%S)#<8U zd(Fic#07~9NJenTkp+nZXW%lI2yuWDkw6?GBrZq@4oIBf_f_}IP8=sT2#E_V&HTH2 zs=oRj|L-w>c;tcaDz1<7`2?Ro^I7Nfv$ycYb@{DIJp+Cjd>s4%xC#CW9Dx4>e+drW z*5Tj4V?6)-fo|VlK#{xkcBMWCUI9nox4}<>zW|>Ge-Db>^>-+B9J~rX0e%boDflud z=YRdd4u1@OjpyHi9|KRmvwMFT9P)e%d=&f%DCfNb{uO-iA#4Kw0e&8QW=*L};5R`z z|F__Wuzl@a9qNaby2$&J?^eoy-vB=a{t0{reE2<`{#QV;?JOO?mJPrODln{M@k6bKQAI*2@ z-6Jl`7bd9>_xHFt$66jaI$xNgFgIU>PocWPJ^8}D>Vtd;qaxIbWAlYO6|PYq=^hnk zr3O) zKo`ajk{(+<4xD$>Her;vBwKS%oQAe4HB3(dhuU*V|6>b)RhaYq~F z>}Y0nWp$Y7Eb5s3>|^??q0OR77tNR-lgOUg7|kBm9I!Ksm|DGiq~1;^X-CBDa?&13 z`1HRuW%OS*rEMB)st$DUd?g$(Fr}Fdbf}Y?VR$3_iES-0vn9sgWk-YnJ!9ZbTiBi8 zck0R{Yg%|wOnF%$NrJT^>a5UOFKKO(wsj!9>l?>Qb+&0jvjss~h&XdG2MC6@u*UhU z>F#>m)uByAfE-1r+L<4z3+@ICpS4wT!^LmupvJD1Tv#fhE(q^+>4&qsPag>^F38y zFf;Mmj+{GC+bClam1W!}e9TH*>J3S<@HZ7>-<7yv>TF}0V(JQ0=`eo57AfbR*fR-L z6|PG*H5RY!v%C|s)!C(&IvP*zrKxDztw13p98dbdd^?-;ksYacS%u|73c*o1XTfZ#I6OSDQ72 zJX0c?wF~DioIN`B*l1(zsX*iL$bj1@VsE6!vwNN(IV2m>lh18`ad_%zot!nX&G4)* zLJ57h^r^8+YgdO~vL3c)B`ybYHR?AzjQ-TKvAx7Q;ZUL#hxp5!+x{X0vh;MjH_&t#p^i<0cyiW2XnW*R>~?3!FBPddBoD+6|St5Ly+ z*_eGup%C=FenM+7RnbyRt*+Q})JLiLjcERYZRJNQET>RbxKD;$@7n!2sj{M7(&E6v zRwj4;Y1Vyy3{xZZ z70X1R3GpQOuKP)O9FIV+%*Tz9=kx(YZp7>B|B)dGuMz zLl6fZ%{LsEOGc~%qkUfoKv+-HT#6YfOpx*>W=MHM$C^1C{o?#E4?|#ny(mc8s%{M0 zx2@#$O}#H`CsHNESKTy}>toq2j#B5}$Pp5C;y6Ft3yrfD$;4XBRhUL+mRC5E*-D->N9EOW#NurZ#$w1~ z#%iUMW+(Pk;68JMwK7*FMlE#BvzF+~wAt{FPxD8r;-6fOSe78+X4sO5#MbRYvj_`0 zJ#&W|w+IN`kFkr{j#%fNouc!@eV2Gmm8Q|;SwB}yia4UBVavZJ9Nl%DOwUE!ji&3- z)`Ub1Alym4r%cNNK?1#O7Z`11BcYizo1t%tkX8-^I literal 0 HcmV?d00001 diff --git a/po/fr.po b/po/fr.po new file mode 100644 index 00000000..89b39284 --- /dev/null +++ b/po/fr.po @@ -0,0 +1,130 @@ +msgid "" +msgstr "" +"Project-Id-Version: tabnet 0.4.0.9000\n" +"POT-Creation-Date: 2023-09-26 13:49\n" +"PO-Revision-Date: 2023-09-26 14:28+0200\n" +"Last-Translator: \n" +"Language-Team: \n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 3.3.2\n" + +msgid "`tabnet_explain()` is not defined for a '%s'." +msgstr "`tabnet_explain()` n’est pas défini pour un ‘%s’." + +msgid "`tabnet_fit()` is not defined for a '%s'." +msgstr "`tabnet_fit()` n’est pas défini pour un ‘%s’." + +msgid "`tabnet_pretrain()` is not defined for a '%s'." +msgstr "`tabnet_pretrain()` n’est pas défini pour un ‘%s’." + +msgid "'%s' is not recognised as a proper TabNet model" +msgstr "‘%s’ n’est pas reconnu comme un modèle TabNet correct" + +msgid "The model was trained for less than '%s' epochs" +msgstr "Le modèle a été entrainé sur moins de ‘%s’ époques" + +msgid "Error: found missing values in the `%s` outcome column." +msgstr "" +"Erreur : Il y a des valeurs manquantes dans la colonne de résultats `%s`." + +msgid "Model dimensions don't match." +msgstr "Les dimensions ne correspondent pas entre les modèles." + +msgid "" +"No model serialized weight can be found in `%s`, check the model history" +msgstr "" +"Il n’y a pas de points sérialisés de modèle dans `%s`, veuillez vérifier " +"l’historique du modèle" + +msgid "The model was trained for less than `%s` epochs" +msgstr "Le modèle a été entrainé sur moins de ‘%s’ époques" + +msgid "Mixed multi-outcome type '%s' is not supported" +msgstr "Le type ’%s’ n’est pas supporté pour des modèles multi-résultat" + +msgid "Unknown outcome type '%s'" +msgstr "Le type `%s` est inconnu pour une colonne de résultats" + +msgid "Outcome is factor and the prediction type is '%s'." +msgstr "" +"La colonne de résultats est catégorielle et la prédiction est de type ‘%s’." + +msgid "Outcome is numeric and the prediction type is '%s'." +msgstr "" +"La colonne de résultats est numérique et la prédiction est de type ‘%s’." + +msgid "" +"The provided hierarchical object is not recognized with a valid format that " +"can be checked" +msgstr "" +"L’objet hiérarchique fournit n’est pas reconnu dans un format valide qui " +"peut être vérifié" + +msgid "" +"The attributes or colnames in the provided hierarchical object use the " +"following reserved names : '%s'. Please change those names as they will lead " +"to unexpected tabnet behavior." +msgstr "" +"Les attributs ou noms de colonnes dans l’objet hiérarchique fournit utilise " +"les noms réservés suivants : ‘%s’. Veuillez changer ces noms pour éviter un " +"comportement imprévisible de TabNet." + +msgid "`%s` is not a valid loss for outcome of type %s" +msgstr "" +"`%s` n’est pas une fonction objectif valide pour les colonnes de résultat de " +"type %s" + +msgid "" +"%s is not a valid early-stopping metric to monitor with `valid_split` = %s" +msgstr "" +"%s n’est pas une métrique valide d’arrêt anticipé avec `valid_split` = %s" + +msgid "Currently only the 'adam' optimizer is supported." +msgstr "Seule la fonction d’optimisation ‘adam’ est supportée pour l’instant." + +msgid "" +"Currently only the 'step' and 'reduce_on_plateau' scheduler are supported." +msgstr "" +"Seule les planifications ‘step’ et ‘reduce_on_plateau’ sont supportées pour " +"l’instant." + +msgid "[Epoch %03d] Loss: %3f" +msgstr "" + +msgid "[Epoch %03d] Loss: %3f, Valid loss: %3fs" +msgstr "" + +msgid "Early stopping at epoch %03d" +msgstr "Arrêt anticipé à l’époque %03d" + +msgid "" +"Computing importances for a dataset with size %s. This can consume too much " +"memory. We are going to use a sample of size 1e5, You can disable this " +"message by using the `importance_sample_size` argument." +msgstr "" +"Calcul de l’importance sur un jeu de données de taille %s. Il se peut que " +"cela consomme trop de mémoire. Aussi le jeu de donnée sera réduit " +"aléatoirement à une taille de 1e5. Vous pouvez rendre ce message silencieux " +"en configurant l’argument `importance_sample_size`." + +msgid "" +"Computing importances for a dataset with size %s. This can consume too much " +"memory. We are going to use a sample of size 1e5. You can disable this " +"message by using the `importance_sample_size` argument." +msgstr "" +"Calcul de l’importance sur un jeu de données de taille %s. Il se peut que " +"cela consomme trop de mémoire. Aussi le jeu de donnée sera réduit " +"aléatoirement à une taille de 1e5. Vous pouvez rendre ce message silencieux " +"en configurant l’argument `importance_sample_size`." + +msgid "'n_steps' should be a positive integer." +msgstr "’n_steps’ doit être un nombre entier positif." + +msgid "'n_shared' and 'n_independant' can't be both zero." +msgstr "’n_shared’ et ’n_independant’ ne peuvent pas être nuls simultanément." + +msgid "Please choose either 'sparsemax' or 'entmax' as 'mask_type'" +msgstr "Vous devez choisir ‘sparsemax’ ou ‘entmax’ pour ‘mask_type’" diff --git a/tests/testthat/test-hardhat_parameters.R b/tests/testthat/test-hardhat_parameters.R index aba25df4..624df69f 100644 --- a/tests/testthat/test-hardhat_parameters.R +++ b/tests/testthat/test-hardhat_parameters.R @@ -61,7 +61,6 @@ test_that("early stopping works wo validation split", { }) - test_that("configuration with categorical_embedding_dimension vector works", { config <- tabnet_config(cat_emb_dim=c(1,1,2,2,1,1,1,2,1,1,1,2,2,2)) @@ -106,6 +105,12 @@ test_that("reduce_on_plateau scheduler works", { lr_decay = 0.1, step_size = 1) ) + expect_error( + fit <- tabnet_fit(x, y, epochs = 3, lr_scheduler = "multiplicative", + lr_decay = 0.1, step_size = 1), + "only the 'step' and 'reduce_on_plateau' scheduler" + ) + sc_fn <- function(optimizer) { torch::lr_reduce_on_plateau(optimizer, factor = 0.1, patience = 10) } @@ -157,7 +162,7 @@ test_that("fit raise an error with non-supported mask-type", { expect_error( tabnet_fit(rec, attrition, epochs = 1, valid_split = 0.25, verbose = TRUE, config = tabnet_config( mask_type="max_entropy")), - regexp = "either sparsemax or entmax" + regexp = "either 'sparsemax' or 'entmax' as" ) }) diff --git a/tests/testthat/test_translations.R b/tests/testthat/test_translations.R new file mode 100644 index 00000000..586c9bbe --- /dev/null +++ b/tests/testthat/test_translations.R @@ -0,0 +1,24 @@ +test_that("early stopping message get translated in french", { + testthat::skip_on_ci() + testthat::skip_on_cran() + withr::with_language(lang = "fr", + expect_error( + tabnet_fit(attrix, attriy, epochs = 200, verbose=TRUE, + early_stopping_monitor="cross_validation_loss", + early_stopping_tolerance=1e-7, early_stopping_patience=3, learn_rate = 0.2), + regexp = "pas une métrique valide d’arrêt anticipé" + ) + ) +}) + +test_that("scheduler message translated in french", { + testthat::skip_on_ci() + testthat::skip_on_cran() + withr::with_language(lang = "fr", + expect_error( + fit <- tabnet_pretrain(x, y, epochs = 3, lr_scheduler = "multiplicative", + lr_decay = 0.1, step_size = 1), + regexp = "planifications ‘step’ et ‘reduce_on_plateau’ sont supportées" + ) + ) +})