diff --git a/R/models.R b/R/models.R index 3c8368c..a46580c 100644 --- a/R/models.R +++ b/R/models.R @@ -291,11 +291,17 @@ fn_ridge = function(list_merged, vec_idx_training, vec_idx_validation, other_par glmnet::cv.glmnet(x=X_training, y=y_training, alpha=0, nfolds=other_params$n_folds, parallel=FALSE), error = function(e) {NA}) if (is.na(sol[1])) { - return(list( - list_perf=NA, - df_y_validation=NA, - vec_effects=NA, - n_non_zero=NA)) + # return(list( + # list_perf=NA, + # df_y_validation=NA, + # vec_effects=NA, + # n_non_zero=NA)) + error = methods::new("gpError", + code=407, + message=paste0( + "Error in models::fn_ridge(...). ", + "Failed to fit the model.")) + return(error) } ### Find the first lambda with the lowest squared error (deviance) while having non-zero SNP effects vec_idx_decreasing_deviance = order(sol$glmnet.fit$dev.ratio, decreasing=FALSE) @@ -444,11 +450,17 @@ fn_lasso = function(list_merged, vec_idx_training, vec_idx_validation, other_par glmnet::cv.glmnet(x=X_training, y=y_training, alpha=1, nfolds=other_params$n_folds, parallel=FALSE), error = function(e) {NA}) if (is.na(sol[1])) { - return(list( - list_perf=NA, - df_y_validation=NA, - vec_effects=NA, - n_non_zero=NA)) + # return(list( + # list_perf=NA, + # df_y_validation=NA, + # vec_effects=NA, + # n_non_zero=NA)) + error = methods::new("gpError", + code=407, + message=paste0( + "Error in models::fn_lasso(...). ", + "Failed to fit the model.")) + return(error) } ### Find the first lambda with the lowest squared error (deviance) while having non-zero SNP effects vec_idx_decreasing_deviance = order(sol$glmnet.fit$dev.ratio, decreasing=FALSE) @@ -597,11 +609,17 @@ fn_elastic_net = function(list_merged, vec_idx_training, vec_idx_validation, oth glmnet::cv.glmnet(x=X_training, y=y_training), error = function(e) {NA}) if (is.na(sol[1])) { - return(list( - list_perf=NA, - df_y_validation=NA, - vec_effects=NA, - n_non_zero=NA)) + # return(list( + # list_perf=NA, + # df_y_validation=NA, + # vec_effects=NA, + # n_non_zero=NA)) + error = methods::new("gpError", + code=407, + message=paste0( + "Error in models::fn_elastic_net(...). ", + "Failed to fit the model.")) + return(error) } ### Find the first lambda with the lowest squared error (deviance) while having non-zero SNP effects vec_idx_decreasing_deviance = order(sol$glmnet.fit$dev.ratio, decreasing=FALSE) @@ -754,7 +772,17 @@ fn_Bayes_A = function(list_merged, vec_idx_training, vec_idx_validation, ### Attempt at preventing overwrites to the running Gibbs samplers in parallel other_params$out_prefix = gsub(":", ".", gsub(" ", "-", paste(other_params$out_prefix, Sys.time(), stats::runif(1), sep="-"))) ### Solve via Bayes A (a priori assume heritability at 50%, i.e. R2=0.5 below) - sol = BGLR::BGLR(y=yNA, ETA=ETA, R2=0.5, nIter=other_params$nIter, burnIn=other_params$burnIn, saveAt=other_params$out_prefix, verbose=FALSE) + sol = tryCatch( + BGLR::BGLR(y=yNA, ETA=ETA, R2=0.5, nIter=other_params$nIter, burnIn=other_params$burnIn, saveAt=other_params$out_prefix, verbose=FALSE), + error = function(e) {NA}) + if (is.na(sol[1])) { + error = methods::new("gpError", + code=407, + message=paste0( + "Error in models::fn_Bayes_A(...). ", + "Failed to fit the model.")) + return(error) + } ### Extract effects including the intercept and fixed effects if (!is.null(list_merged$COVAR)) { b_hat = c(sol$mu, sol$ETA[[2]]$b, sol$ETA$MRK$b) @@ -901,7 +929,17 @@ fn_Bayes_B = function(list_merged, vec_idx_training, vec_idx_validation, ### Attempt at preventing overwrites to the running Gibbs samplers in parallel other_params$out_prefix = gsub(":", ".", gsub(" ", "-", paste(other_params$out_prefix, Sys.time(), stats::runif(1), sep="-"))) ### Solve via Bayes B (a priori assume heritability at 50%, i.e. R2=0.5 below) - sol = BGLR::BGLR(y=yNA, ETA=ETA, R2=0.5, nIter=other_params$nIter, burnIn=other_params$burnIn, saveAt=other_params$out_prefix, verbose=FALSE) + sol = tryCatch( + BGLR::BGLR(y=yNA, ETA=ETA, R2=0.5, nIter=other_params$nIter, burnIn=other_params$burnIn, saveAt=other_params$out_prefix, verbose=FALSE), + error = function(e) {NA}) + if (is.na(sol[1])) { + error = methods::new("gpError", + code=407, + message=paste0( + "Error in models::fn_Bayes_B(...). ", + "Failed to fit the model.")) + return(error) + } ### Extract effects including the intercept and fixed effects if (!is.null(list_merged$COVAR)) { b_hat = c(sol$mu, sol$ETA[[2]]$b, sol$ETA$MRK$b) @@ -1048,7 +1086,17 @@ fn_Bayes_C = function(list_merged, vec_idx_training, vec_idx_validation, ### Attempt at preventing overwrites to the running Gibbs samplers in parallel other_params$out_prefix = gsub(":", ".", gsub(" ", "-", paste(other_params$out_prefix, Sys.time(), stats::runif(1), sep="-"))) ### Solve via Bayes C (a priori assume heritability at 50%, i.e. R2=0.5 below) - sol = BGLR::BGLR(y=yNA, ETA=ETA, R2=0.5, nIter=other_params$nIter, burnIn=other_params$burnIn, saveAt=other_params$out_prefix, verbose=FALSE) + sol = tryCatch( + BGLR::BGLR(y=yNA, ETA=ETA, R2=0.5, nIter=other_params$nIter, burnIn=other_params$burnIn, saveAt=other_params$out_prefix, verbose=FALSE), + error = function(e) {NA}) + if (is.na(sol[1])) { + error = methods::new("gpError", + code=407, + message=paste0( + "Error in models::fn_Bayes_C(...). ", + "Failed to fit the model.")) + return(error) + } ### Extract effects including the intercept and fixed effects if (!is.null(list_merged$COVAR)) { b_hat = c(sol$mu, sol$ETA[[2]]$b, sol$ETA$MRK$b) @@ -1196,9 +1244,22 @@ fn_gBLUP = function(list_merged, vec_idx_training, vec_idx_validation, other_par eval(parse(text=paste0("df_training$covariate_", j, " = X[, j]"))) } covariates_string = paste(paste0("covariate_", 1:ncol(X)), collapse="+") - eval(parse(text=paste0("mod = sommer::mmer(y ~ 1 + ", covariates_string, ", random= ~vsr(id, Gu=A ), rcov= ~vsr(units), data=df_training, dateWarning=FALSE, verbose=FALSE)"))) + mod = tryCatch( + eval(parse(text=paste0("sommer::mmer(y ~ 1 + ", covariates_string, ", random= ~vsr(id, Gu=A ), rcov= ~vsr(units), data=df_training, dateWarning=FALSE, verbose=FALSE)"))), + error=function(e){NA}) } else { - mod = sommer::mmer(y ~ 1, random= ~vsr(id, Gu=A), rcov= ~vsr(units), data=df_training, dateWarning=FALSE, verbose=FALSE) + mod = tryCatch( + sommer::mmer(y ~ 1, random= ~vsr(id, Gu=A), rcov= ~vsr(units), data=df_training, dateWarning=FALSE, verbose=FALSE), + error=function(e){NA}) + } + ### Error catching + if (is.na(mod[1])) { + error = methods::new("gpError", + code=407, + message=paste0( + "Error in models::fn_gBLUP(...). ", + "Failed to fit the model.")) + return(error) } ### Extract effects b_hat = mod$Beta$Estimate; names(b_hat) = mod$Beta$Effect