diff --git a/DESCRIPTION b/DESCRIPTION index 6cbe66ea..b7369c9a 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: antaresRead Type: Package Title: Import, Manipulate and Explore the Results of an 'Antares' Simulation -Version: 2.7.0 +Version: 2.7.1 Authors@R: c( person("Tatiana", "Vargas", email = "tatiana.vargas@rte-france.com", role = c("aut", "cre")), person("Jalal-Edine", "ZAWAM", role = "aut"), @@ -15,6 +15,7 @@ Authors@R: c( person("Clement", "Berthet", role = "ctb"), person("Kamel", "Kemiha", role = "ctb"), person("Abdallah", "Mahoudi", role = "ctb"), + person("Nicolas", "Boitard", role = "ctb"), person("RTE", role = "cph") ) Description: Import, manipulate and explore results generated by 'Antares', a @@ -40,7 +41,8 @@ Imports: utils, memuse, purrr, - lifecycle + lifecycle, + assertthat Suggests: rhdf5 (>= 2.24.0), testthat, diff --git a/NAMESPACE b/NAMESPACE index a3692c35..deb945ef 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -48,6 +48,7 @@ export(ponderateMcAggregation) export(readAntares) export(readAntaresAreas) export(readAntaresClusters) +export(readAntaresSTClusters) export(readBindingConstraints) export(readClusterDesc) export(readClusterResDesc) @@ -79,6 +80,7 @@ import(doParallel) import(jsonlite) import(parallel) import(plyr) +importFrom(assertthat,assert_that) importFrom(doParallel,registerDoParallel) importFrom(grDevices,col2rgb) importFrom(grDevices,rgb) diff --git a/NEWS.md b/NEWS.md index 30c65bf9..4d5fcb5e 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,5 +1,29 @@ > Copyright © 2016 RTE Réseau de transport d’électricité +# antaresRead 2.7.1 (development) + +NEW FEATURES: + +* `readInputThermal()` : + - new parameter **areas** to get desired clusters from selected areas. + - new parameter **thermalAvailabilities** to import time series. +* `readInputRES()` new parameter **areas** to get desired clusters from selected areas. +* `setSimulationPath()` return a new parameter `binding` (for studies >= v8.7.0). +It contains a table with group dimensions of time series for binding constraints. +* `readAntares()` new parameter **clustersST** to read (output simulation) short-term clusters +* New function `readAntaresSTClusters()` + + +BREAKING CHANGES : + +* `readInputThermal()` / `readInputRES()` default value when no time series in the selected clusters. + +BUGFIXES : + +* `readInputThermal()` return data from file data.txt with `thermalData` parameter +* `setSimulationPath()` has also the parameter **areasWithSTClusters** in 'output' mode + + # antaresRead 2.7.0 ### Breaking changes (Antares v8.7.0) : @@ -24,7 +48,7 @@ Dependencies : -# antaresRead 2.6.2 (development) +# antaresRead 2.6.2 BUGFIXES : * `readIniFile()` : avoid `utils::type.convert` on specific cases (ex : 789e or 123i) @@ -46,6 +70,8 @@ BUGFIXES : BREAKING CHANGES : * `api_get()` has a new parameter to control JSON file parsing +* `readInputThermal()` default value when no time series in the selected clusters. +* `readInputRES()` default value when no time series in the selected clusters * `readClusterDesc()`/ `readClusterRESDesc()` / `readClusterSTDesc()` return empty dataTable and warning if no cluster in Antares study. @@ -61,7 +87,7 @@ BREAKING CHANGES (Antares v8.6) : * `readInputTS()` is now compatible to read time series with : - "short-term storage" - "mingen" (pmin hydro value) -* `setSimulationPath()` has new parameter `areasWithSTClusters` (name of area with "st-storage" cluster) +* `setSimulationPath()` has new parameter **areasWithSTClusters** (name of area with "st-storage" cluster) BUGFIXES : diff --git a/R/importInput.R b/R/importInput.R index 9dea2ad0..f0360646 100644 --- a/R/importInput.R +++ b/R/importInput.R @@ -369,8 +369,13 @@ data$area <- area data$cluster <- cl - data <- data[opts$timeIdMin:opts$timeIdMax] - data$timeId <- opts$timeIdMin:opts$timeIdMax + + # index blocks + a <- opts$parameters$general$simulation.start + b <- opts$parameters$general$simulation.end + + data <- data[a:b] + data$timeId <- a:b changeTimeStep(data, timeStep, "daily", fun = "mean") }) diff --git a/R/importOutput.R b/R/importOutput.R index 524782d9..a944e5fd 100644 --- a/R/importOutput.R +++ b/R/importOutput.R @@ -47,6 +47,7 @@ #' - "areas", "values" => areas #' - "areas", "details" => clusters #' - "areas", "details-res" => renewables clusters +#' - "areas", "details-STstorage" => short-term clusters #' - "links", "values" => links #' #' @return @@ -281,30 +282,37 @@ #' .get_value_columns_details_file #' -#' Private function used to get the column names for the details-timeStep.txt or details-res-timeStep.txt. -#' Used in .importOutputForClusters() and .importOutputForResClusters() +#' Private function used to get the column names for the details-timeStep.txt, details-res-timeStep.txt, or details-STstorage-timeStep.txt. +#' Used in .importOutputForClusters(), .importOutputForResClusters(), and .importOutputForSTClusters() #' From the opts, we detect which outputs the user decides to take #' #' @return #' a vector +#' +#' @importFrom assertthat assert_that #' #' @noRd #' .get_value_columns_details_file <- function(opts, type) { - if(type == "details") { - # Order is important. There is a correspondance between elements. - all_thematic_variables <- c("DTG by plant", "NP Cost by plant", "NODU by plant") - colNames <- c("production", "NP Cost", "NODU") - if (opts$antaresVersion >= 830){ - all_thematic_variables <- c(all_thematic_variables, "Profit by plant") - colNames <- c(colNames, "profit") - } - } else if(type == "details-res") { - # Order is important. There is a correspondance between elements. - all_thematic_variables <- c("RES generation by plant") - colNames <- c("production") - } + assert_that(type %in% c("details","details-res","details-STstorage")) + + simulation_variables_names_by_support <- read.table(system.file( + "format_output","simulation_variables_names_by_support.csv",package="antaresRead" + ),sep=";",fileEncoding="UTF-8",header = TRUE) + + filtered_variables_names <- subset(simulation_variables_names_by_support,DETAILS_FILES_TYPE==type) + if (type=="details" && opts$antaresVersion < 830) + filtered_variables_names <- subset(filtered_variables_names,ANTARES_DISPLAYED_NAME!="Profit by plant") + + # Order is important. There is a correspondance between elements + ordered_filtered_variables_names <- filtered_variables_names[ + order(filtered_variables_names$ORDINAL_POSITION_BY_TOPIC), + ] + + all_thematic_variables <- ordered_filtered_variables_names$ANTARES_DISPLAYED_NAME + colNames <- ordered_filtered_variables_names$RPACKAGE_DISPLAYED_NAME + # With thematic-trimming enabled if (opts$parameters$general$`thematic-trimming`) { if ("variables selection" %in% names(opts$parameters)) { @@ -346,36 +354,8 @@ .importOutputForClusters <- function(areas, timeStep, select = NULL, mcYears = NULL, showProgress, opts, mustRun = FALSE, parallel) { - # In output files, there is one file per area with the follwing form: - # cluster1-var1 | cluster2-var1 | cluster1-var2 | cluster2-var2 - # the following function reshapes the result to have variable cluster in column. - # To improve greatly the performance we use our knowledge of the position of - # the columns instead of using more general functions like dcast. - reshapeFun <- function(x) { - - # Get cluster names - n <- names(x) - idx <- ! n %in% pkgEnv$idVars - clusterNames <- tolower(unique(n[idx])) - - # Id vars names - idVarsId <- which(!idx) - idVarsNames <- n[idVarsId] - - # Column names of the output table - colNames <- .get_value_columns_details_file(opts, "details") - - # Loop over clusters - nclusters <- length(clusterNames) - - res <- llply(1:nclusters, function(i) { - dt <- x[, c(nclusters * 0:(length(colNames) - 1) + i, idVarsId), with = FALSE] - setnames(dt, c(colNames, idVarsNames)) - dt[, cluster := as.factor(clusterNames[i])] - dt - }) - - rbindlist(res) + reshapeFun <- function(x){ + .reshape_details_file(x,file_type="details",opts=opts) } if (!mustRun) { @@ -490,6 +470,47 @@ } +#' .reshape_details_file +#' +#' In output files, there is one file per area with the follwing form: +#' cluster1-var1 | cluster2-var1 | cluster1-var2 | cluster2-var2 +#' the following function reshapes the result to have variable cluster in column. +#' To improve greatly the performance we use our knowledge of the position of +#' the columns instead of using more general functions like dcast. +#' +#' @return +#' a data.table +#' +#' @noRd +#' +.reshape_details_file <- function(x,file_type,opts) { + + # Get cluster names + n <- names(x) + idx <- ! n %in% pkgEnv$idVars + clusterNames <- tolower(unique(n[idx])) + + # Id vars names + idVarsId <- which(!idx) + idVarsNames <- n[idVarsId] + + # Column names of the output table + colNames <- .get_value_columns_details_file(opts=opts,type=file_type) + + # Loop over clusters + nclusters <- length(clusterNames) + + res <- llply(1:nclusters, function(i) { + dt <- x[, c(nclusters * 0:(length(colNames) - 1) + i, idVarsId), with = FALSE] + setnames(dt, c(colNames, idVarsNames)) + dt[, cluster := as.factor(clusterNames[i])] + dt + }) + + rbindlist(res) +} + + #' .importOutputForResClusters #' #' Private function used to import the output for the renewable clusters of one area @@ -502,38 +523,11 @@ .importOutputForResClusters <- function(areas, timeStep, select = NULL, mcYears = NULL, showProgress, opts, parallel) { - # In output files, there is one file per area with the follwing form: - # cluster1-var1 | cluster2-var1 | cluster1-var2 | cluster2-var2 - # the following function reshapes the result to have variable cluster in column. - # To improve greatly the performance we use our knowledge of the position of - # the columns instead of using more general functions like dcast. + reshapeFun <- function(x) { - - # Get cluster names - n <- names(x) - idx <- ! n %in% pkgEnv$idVars - clusterNames <- tolower(unique(n[idx])) - - # Id vars names - idVarsId <- which(!idx) - idVarsNames <- n[idVarsId] - - # Column names of the output table - colNames <- .get_value_columns_details_file(opts, "details-res") - - # Loop over clusters - nclusters <- length(clusterNames) - - res <- llply(1:nclusters, function(i) { - dt <- x[, c(nclusters * 0:(length(colNames) - 1) + i, idVarsId), with = FALSE] - setnames(dt, c(colNames, idVarsNames)) - dt[, cluster := as.factor(clusterNames[i])] - dt - }) - - rbindlist(res) + .reshape_details_file(x,file_type="details-res",opts=opts) } - + suppressWarnings( .importOutput("areas", "details-res", "area", areas, timeStep, NULL, mcYears, showProgress, opts, reshapeFun, sameNames = FALSE, @@ -542,6 +536,29 @@ } +#' .importOutputForSTClusters +#' +#' Private function used to import the output for the short-term clusters of one area +#' +#' @return +#' a data.table +#' +#' @noRd +#' +.importOutputForSTClusters <- function(areas, timeStep, select = NULL, mcYears = NULL, + showProgress, opts, parallel) { + + reshapeFun <- function(x) { + .reshape_details_file(x,file_type="details-STstorage",opts=opts) + } + + suppressWarnings( + .importOutput("areas", "details-STstorage", "area", areas, timeStep, NULL, + mcYears, showProgress, opts, reshapeFun, sameNames = FALSE, + objectDisplayName = "clustersST", parallel = parallel) + ) +} + #' .importOutputForBindingConstraints #' #' Private function used to import the output for binding constraints. diff --git a/R/readAntares.R b/R/readAntares.R index 4a9ac46f..86ea9683 100644 --- a/R/readAntares.R +++ b/R/readAntares.R @@ -14,6 +14,8 @@ #' Read the data of an Antares simulation #' #' @description +#' `r antaresRead:::badge_api_ok()` +#' #' \code{readAntares} is a swiss-army-knife function used to read almost every #' possible time series of an antares Project at any desired time resolution #' (hourly, daily, weekly, monthly or annual). @@ -85,6 +87,11 @@ #' import results at renewable cluster level. If \code{NULL} no cluster is imported. The #' special value \code{"all"} tells the function to import renewable clusters from all #' areas. +#' @param clustersST +#' Vector containing the name of the areas for which you want to +#' import results at short-term cluster level. If \code{NULL} no cluster is imported. The +#' special value \code{"all"} tells the function to import short-term clusters from all +#' areas. #' @param bindingConstraints #' Should binding constraints be imported (v8.4+)? #' @param districts @@ -210,8 +217,8 @@ #' @export #' readAntares <- function(areas = NULL, links = NULL, clusters = NULL, - districts = NULL, clustersRes = NULL, bindingConstraints = FALSE, - misc = FALSE, thermalAvailabilities = FALSE, + districts = NULL, clustersRes = NULL, clustersST = NULL, + bindingConstraints = FALSE, misc = FALSE, thermalAvailabilities = FALSE, hydroStorage = FALSE, hydroStorageMaxPower = FALSE, reserve = FALSE, linkCapacity = FALSE, mustRun = FALSE, thermalModulation = FALSE, @@ -221,7 +228,7 @@ readAntares <- function(areas = NULL, links = NULL, clusters = NULL, mcWeights = NULL, opts = simOptions(), parallel = FALSE, simplify = TRUE, showProgress = TRUE) { - + if((!is.null(opts$parameters$`other preferences`$`renewable-generation-modelling`) && !opts$parameters$`other preferences`$`renewable-generation-modelling` %in% "clusters") || is.null(opts$parameters$`other preferences`$`renewable-generation-modelling`)){ @@ -309,6 +316,7 @@ readAntares <- function(areas = NULL, links = NULL, clusters = NULL, links = links, clusters = clusters, clustersRes = clustersRes, + clustersST = clustersST, districts = districts, mcYears = mcYears) @@ -317,6 +325,7 @@ readAntares <- function(areas = NULL, links = NULL, clusters = NULL, links <- reqInfos$links clusters <- reqInfos$clusters clustersRes <- reqInfos$clustersRes + clustersST <- reqInfos$clustersST districts <- reqInfos$districts mcYears <- reqInfos$mcYears synthesis <- reqInfos$synthesis @@ -328,7 +337,7 @@ readAntares <- function(areas = NULL, links = NULL, clusters = NULL, return(aggregateResult(opts = opts, verbose = showProgress, filtering = TRUE, - selected = list(areas = areas, links = links, clusters = clusters, clustersRes = clustersRes), + selected = list(areas = areas, links = links, clusters = clusters, clustersRes = clustersRes, clustersST = clustersST), timestep = timeStep, writeOutput = FALSE, mcWeights = mcWeights, mcYears = mcYears)) @@ -342,7 +351,7 @@ readAntares <- function(areas = NULL, links = NULL, clusters = NULL, } # If all arguments are NULL, import all areas - if (is.null(areas) & is.null(links) & is.null(clusters) & is.null(clustersRes) & is.null(districts)) { + if (is.null(areas) & is.null(links) & is.null(clusters) & is.null(clustersRes) & is.null(clustersST) & is.null(districts)) { areas <- "all" } @@ -353,6 +362,7 @@ readAntares <- function(areas = NULL, links = NULL, clusters = NULL, links <- .checkArg(links, opts$linkList, "Links %s do not exist in the simulation.") clusters <- .checkArg(clusters, opts$areasWithClusters, "Areas %s do not exist in the simulation or do not have any thermal cluster.") clustersRes <- .checkArg(clustersRes, opts$areasWithResClusters, "Areas %s do not exist in the simulation or do not have any renewable cluster.") + clustersST <- .checkArg(clustersST, opts$areasWithSTClusters, "Areas %s do not exist in the simulation or do not have any short-term cluster.") districts <- .checkArg(districts, opts$districtList, "Districts %s do not exist in the simulation.") mcYears <- .checkArg(mcYears, opts$mcYears, "Monte-Carlo years %s have not been exported.", allowDup = TRUE) @@ -476,6 +486,12 @@ readAntares <- function(areas = NULL, links = NULL, clusters = NULL, opts, parallel = parallel) if(!is.null(res$clustersRes) && nrow(res$clustersRes) == 0) res$clustersRes <- NULL + # Import short-term clusters + res$clustersST <- .importOutputForSTClusters(clustersST, timeStep, NULL, + mcYears, showProgress, + opts, parallel = parallel) + if(!is.null(res$clustersST) && nrow(res$clustersST) == 0) res$clustersST <- NULL + # Import thermal clusters and eventually must run if (!mustRun) { res$clusters <- .importOutputForClusters(clusters, timeStep, NULL, mcYears, @@ -820,6 +836,7 @@ readAntaresAreas <- function(areas, links = TRUE, clusters = TRUE, clustersRes = links, clusters, clustersRes, + clustersST, districts, mcYears){ @@ -873,10 +890,14 @@ readAntaresAreas <- function(areas, links = TRUE, clusters = TRUE, clustersRes = if (!is.null(areas)) clustersRes <- areas else clustersRes <- "all" } + if ("clustersST" %in% unlist(select) & is.null(clustersST)) { + if (!is.null(areas)) clustersST <- areas + else clustersST <- "all" + } if ("mcYears" %in% unlist(select) & is.null(mcYears)) mcYears <- "all" # If all arguments are NULL, import all areas - if (is.null(areas) & is.null(links) & is.null(clusters) & is.null(districts)) { + if (is.null(areas) & is.null(links) & is.null(clusters) & is.null(districts) & is.null(clustersST)) { areas <- "all" } @@ -888,6 +909,7 @@ readAntaresAreas <- function(areas, links = TRUE, clusters = TRUE, clustersRes = links = links, clusters = clusters, clustersRes = clustersRes, + clustersST = clustersST, districts = districts, mcYears = mcYears, synthesis = synthesis, diff --git a/R/readAntaresClusters.R b/R/readAntaresClusters.R index 9717d70a..bb0ebc3f 100644 --- a/R/readAntaresClusters.R +++ b/R/readAntaresClusters.R @@ -36,4 +36,56 @@ readAntaresClusters <- function(clusters, selected = c("production", "NP Cost", subset(res, cluster %in% clusters, select = c(setdiff(colnames(res),c("production", "NP Cost", "NODU", "profit")), intersect(colnames(res),selected))) #support for up to v8.4 -} \ No newline at end of file +} + + +#' Read output for a list of short-term storage clusters +#' +#' @param clustersST vector of short-term storage clusters to be imported +#' @param selected vector of thematic trimming +#' @inheritParams readAntares +#' +#' @return data.table of results for short-term storage clusters +#' +#' @export +readAntaresSTClusters <- function(clustersST, selected = c("P.injection", "levels", "P.withdrawal"), + timeStep = c("hourly", "daily", "weekly", "monthly", "annual"), + opts = simOptions(), parallel = FALSE, showProgress = TRUE) { + + if (missing(clustersST)) { + stop("The function 'readAntaresSTClusters' expects a vector of short-term storage clusters names as argument.") + } + if ("Input" %in% opts$mode) { + stop("Cannot use 'readAntaresSTClusters' in 'Input' mode.") + } + if (opts$antaresVersion < 860) { + stop("Cannot use 'readAntaresSTClusters' for a study version < 860.") + } + + ##Add check control for all + allSTClusters <- readClusterSTDesc(opts = opts)[, c("area","cluster")] + allSTClusters$lower_cluster <- tolower(allSTClusters$cluster) + ind_cluster <- which(allSTClusters$lower_cluster %in% .checkArg(tolower(clustersST), + tolower(unique(allSTClusters$cluster)), + "short-term storage clusters %s do not exist in the simulation.")) + clustersST <- allSTClusters$cluster[ind_cluster] + + ind_cluster <- which(allSTClusters$lower_cluster %in% .checkArg(tolower(clustersST), + tolower(unique(allSTClusters[area %in% opts$areasWithSTClusters]$cluster)), + "short-term storage clusters %s have no output.")) + clustersST <- unique(allSTClusters$cluster[ind_cluster]) + + output_st_clusters <- data.table() + if (length(clustersST) > 0) { + areas <- unique(allSTClusters[cluster %in% clustersST]$area) + + res <- readAntares(clustersST = areas, timeStep = timeStep, opts = opts, + parallel = parallel, showProgress = showProgress) + + output_st_clusters <- subset(res, cluster %in% clustersST, select = c(setdiff(colnames(res),c("P.injection", "levels", "P.withdrawal")), + intersect(colnames(res),selected)) + ) + } + + return(output_st_clusters) +} diff --git a/R/readInputClusters.R b/R/readInputClusters.R index 61dc812d..185f0fc3 100644 --- a/R/readInputClusters.R +++ b/R/readInputClusters.R @@ -7,7 +7,9 @@ #' project. But contrary to \code{\link{readAntares}}, it only reads time series #' stored in the input folder, so it can work in "input" mode. #' +#' @param areas vector of areas names for which thermal time series must be read. #' @param clusters vector of clusters names for which thermal time series must be read. +#' @param thermalAvailabilities if TRUE, return thermalAvailabilities data #' @param thermalModulation if TRUE, return thermalModulation data #' @param thermalData if TRUE, return thermalData from prepro #' @inheritParams readAntares @@ -27,13 +29,24 @@ #' \code{\link{getAreas}}, \code{\link{getLinks}} #' #' @export -readInputThermal <- function(clusters = NULL, thermalModulation = FALSE, thermalData = FALSE, +readInputThermal <- function(areas = "all", + clusters, + thermalAvailabilities = TRUE, + thermalModulation = FALSE, + thermalData = FALSE, opts = simOptions(), timeStep = c("hourly", "daily", "weekly", "monthly", "annual"), - simplify = TRUE, parallel = FALSE, + simplify = TRUE, + parallel = FALSE, showProgress = TRUE) { + if(!any(thermalAvailabilities, thermalModulation, thermalData)){ + stop("At least one type of data should be selected") + } + timeStep <- match.arg(timeStep) + areas <- tolower(unique(areas)) + clusters <- tolower(unique(clusters)) # Can the importation be parallelized ? if (parallel) { @@ -41,46 +54,73 @@ readInputThermal <- function(clusters = NULL, thermalModulation = FALSE, thermal if (!foreach::getDoParRegistered()) stop("Parallelized importation impossible. Please register a parallel backend, for instance with function 'registerDoParallel'") } - allAreasClusters <- readClusterDesc(opts = opts)[area %in% opts$areasWithClusters, c("area", "cluster")] - allClusters <- unique(allAreasClusters$cluster) - # Manage special value "all" - if(identical(clusters, "all")) clusters <- allClusters - - if (length(setdiff(tolower(clusters), tolower(allClusters))) > 0){ - cat(c("the following clusters are not available : ",setdiff(tolower(clusters), tolower(allClusters)))) - stop("Some clusters are not available in the areas specified") + allAreasClusters <- readClusterDesc(opts = opts)[, c("area", "cluster")] + + #To compare with area and cluster selected + allAreasClusters$lower_area <- tolower(allAreasClusters$area) + allAreasClusters$lower_cluster <- tolower(allAreasClusters$cluster) + + if (identical(areas, "all")) { + areas <- allAreasClusters$area + }else{ + # Check for unavailable areas + diff_areas <- setdiff(areas, allAreasClusters$lower_area) + if (length(diff_areas) > 0) { + stop(paste0("the following areas are not available:", diff_areas)) + } + } + # All areas selected with corresponding clusters + allAreasClusters_filtered_area <- allAreasClusters[area %in% areas] + + if (identical(clusters, "all")) { + clusters <- allAreasClusters_filtered_area$cluster + }else{ + # Check for unavailable clusters + diff_clusters <- setdiff(clusters, allAreasClusters_filtered_area$lower_cluster) + if (length(diff_clusters) > 0) { + stop(paste0("the following clusters are not available:", diff_clusters)) + } } + # Couple areas/clusters of interest. + allAreasClusters_filtered <- allAreasClusters_filtered_area[cluster %in% clusters] + + # To loop + clusters <- unique(allAreasClusters_filtered$cluster) - ind_cluster <- which(tolower(allClusters) %in% tolower(clusters)) - clusters <- unique(allClusters[ind_cluster]) res <- list() # Object the function will return - thermalTS <- as.data.table(ldply(clusters, function(cl) { + # ThermalAvailabilities processing (/series) + if (thermalAvailabilities){ + thermalTS <- as.data.table(ldply(clusters, function(cl) { + areas <- allAreasClusters_filtered[cluster == cl]$area + resCl <- ldply(areas, function(x){ + filePattern <- sprintf("%s/%s/%%s/series.txt", "thermal/series", x) + mid <- .importInputTS(cl, timeStep, opts, filePattern, "ThermalAvailabilities", + inputTimeStep = "hourly", type = "matrix") + + if (is.null(mid)){ + nb_rows_ts <- opts$timeIdMax + timeId_value <- seq(1,nb_rows_ts) + tsId_value <- replicate(nb_rows_ts,1) + ThermalAvailabilities_value <- replicate(nb_rows_ts,0) + mid <- data.table("timeId" = timeId_value, "tsId" = tsId_value, "ThermalAvailabilities" = ThermalAvailabilities_value) + } + mid$area <- x + mid$cluster <- cl + mid + }) + resCl <- dcast(as.data.table(resCl), area + cluster + timeId ~ tsId, value.var = "ThermalAvailabilities") + })) - area <- unique(allAreasClusters[cluster == cl]$area) - if (length(area) > 1) warning(cl," is in more than one area") - resCl <- ldply(area, function(x){ - filePattern <- sprintf("%s/%s/%%s/series.txt", "thermal/series", x) - mid <- .importInputTS(cl, timeStep, opts, filePattern, "ThermalAvailabilities", - inputTimeStep = "hourly", type = "matrix") - if (is.null(mid)) return (data.table()) - mid$area <- x - mid$cluster <- cl - mid - }) + tsCols <- setdiff(colnames(thermalTS), c("area", "cluster", "timeId")) + setnames(thermalTS, tsCols, paste0("ts",tsCols)) + setcolorder(thermalTS, c("area", "cluster", "timeId", setdiff(names(thermalTS), c("area", "cluster", "timeId")))) - resCl <- dcast(as.data.table(resCl), area + cluster + timeId ~ tsId, value.var = "ThermalAvailabilities") - })) - - tsCols <- setdiff(colnames(thermalTS), c("area", "cluster", "timeId")) - setnames(thermalTS, tsCols, paste0("ts",tsCols)) - setcolorder(thermalTS, c("area", "cluster", "timeId", setdiff(names(thermalTS), c("area", "cluster", "timeId")))) - - if (nrow(thermalTS) > 0) res$thermalAvailabilities <- thermalTS + if (nrow(thermalTS) > 0) res$thermalAvailabilities <- thermalTS + } - # thermalModulation processing + # thermalModulation processing (/prepro/.../.../modulation.txt) if (thermalModulation){ - areas <- unique(allAreasClusters[cluster %in% clusters]$area) thermalMod <- as.data.table(ldply(areas, .importThermalModulation, opts = opts, timeStep = timeStep)) thermalMod <- thermalMod[cluster %in% clusters] setcolorder(thermalMod, c("area", "cluster", "timeId", setdiff(names(thermalMod), c("area", "cluster", "timeId")))) @@ -88,21 +128,21 @@ readInputThermal <- function(clusters = NULL, thermalModulation = FALSE, thermal if (nrow(thermalMod) > 0) res$thermalModulation <- thermalMod } - # thermalData processing + # thermalData processing (/prepro/.../.../data.txt) if (thermalData){ - areas <- unique(allAreasClusters[cluster %in% clusters]$area) thermalDat <- as.data.table(ldply(areas, .importThermalData, opts = opts, timeStep = timeStep)) thermalDat <- thermalDat[cluster %in% clusters] setcolorder(thermalDat, c("area", "cluster", "timeId", setdiff(names(thermalDat), c("area", "cluster", "timeId")))) if (nrow(thermalDat) > 0) res$thermalData <- thermalDat } - + if (length(res) == 0) stop("At least one argument of readInputTS has to be defined.") # Class and attributes res <- .addClassAndAttributes(res, NULL, timeStep, opts, simplify) addDateTimeColumns(res) + } @@ -115,6 +155,7 @@ readInputThermal <- function(clusters = NULL, thermalModulation = FALSE, thermal #' project. But contrary to \code{\link{readAntares}}, it only reads time series #' stored in the input folder, so it can work in "input" mode. #' +#' @param areas vector of RES areas names for which renewable time series must be read. #' @param clusters vector of RES clusters names for which renewable time series must be read. #' @inheritParams readAntares #' @@ -126,12 +167,17 @@ readInputThermal <- function(clusters = NULL, thermalModulation = FALSE, thermal #' \code{\link{getAreas}}, \code{\link{getLinks}} #' #' @export -readInputRES <- function(clusters = NULL, opts = simOptions(), +readInputRES <- function(areas = "all", + clusters, + opts = simOptions(), timeStep = c("hourly", "daily", "weekly", "monthly", "annual"), - simplify = TRUE, parallel = FALSE, + simplify = TRUE, + parallel = FALSE, showProgress = TRUE) { timeStep <- match.arg(timeStep) + areas <- tolower(unique(areas)) + clusters <- tolower(unique(clusters)) # Can the importation be parallelized ? if (parallel) { @@ -140,28 +186,48 @@ readInputRES <- function(clusters = NULL, opts = simOptions(), } allAreasClusters <- readClusterResDesc(opts = opts)[area %in% opts$areasWithResClusters, c("area", "cluster")] - allClusters <- unique(allAreasClusters$cluster) - # Manage special value "all" - if(identical(clusters, "all")) clusters <- allClusters - - if (length(setdiff(tolower(clusters), tolower(allClusters))) > 0){ - cat(c("the following clusters are not available : ",setdiff(tolower(clusters), tolower(allClusters)))) - stop("Some clusters are not available in the areas specified") + allAreasClusters$lower_area <- tolower(allAreasClusters$area) + allAreasClusters$lower_cluster <- tolower(allAreasClusters$cluster) + + if (identical(areas, "all")) { + areas <- allAreasClusters$area + }else{ + # Check for unavailable areas + diff_areas <- setdiff(areas, allAreasClusters$lower_area) + if (length(diff_areas) > 0) { + stop(paste0("the following areas are not available:", diff_areas)) + } + } + allAreasClusters_filtered_area <- allAreasClusters[area %in% areas] + + if (identical(clusters, "all")) { + clusters <- allAreasClusters_filtered_area$cluster + }else{ + # Check for unavailable clusters + diff_clusters <- setdiff(clusters, allAreasClusters_filtered_area$lower_cluster) + if (length(diff_clusters) > 0) { + stop(paste0("the following clusters are not available:", diff_clusters)) + } } + allAreasClusters_filtered <- allAreasClusters_filtered_area[cluster %in% clusters] + clusters <- unique(allAreasClusters_filtered$cluster) - ind_cluster <- which(tolower(allClusters) %in% tolower(clusters)) - clusters <- unique(allClusters[ind_cluster]) res <- list() # Object the function will return ResTS <- as.data.table(ldply(clusters, function(cl) { - area <- unique(allAreasClusters[cluster == cl]$area) - if (length(area) > 1) warning(cl," is in more than one area") - resCl <- ldply(area, function(x){ + areas <- allAreasClusters_filtered[cluster == cl]$area + resCl <- ldply(areas, function(x){ filePattern <- sprintf("%s/%s/%%s/series.txt", "renewables/series", x) mid <- .importInputTS(cl, timeStep, opts, filePattern, "production", inputTimeStep = "hourly", type = "matrix") - if (is.null(mid)) return (data.table()) + if (is.null(mid)){ + nb_rows_ts <- opts$timeIdMax + timeId_value <- seq(1,nb_rows_ts) + tsId_value <- replicate(nb_rows_ts,1) + production_value <- replicate(nb_rows_ts,0) + mid <- data.table("timeId" = timeId_value, "tsId" = tsId_value, "production" = production_value) + } mid$area <- x mid$cluster <- cl mid diff --git a/R/setSimulationPath.R b/R/setSimulationPath.R index 5df538e3..52692979 100644 --- a/R/setSimulationPath.R +++ b/R/setSimulationPath.R @@ -56,8 +56,11 @@ #' \item{districtList}{Vector of the available districts.} #' \item{linkList}{Vector of the available links.} #' \item{areasWithClusters}{Vector of areas containing clusters.} +#' \item{areasWithResClusters}{Vector of areas containing clusters renewable.} +#' \item{areasWithSTClusters}{Vector of areas containing clusters storage (>=v8.6.0).} #' \item{variables}{Available variables for areas, districts and links.} #' \item{parameters}{Other parameters of the simulation.} +#' \item{binding}{Table of time series dimensions for each group (>=v8.7.0).} #' \item{timeIdMin}{ #' Minimum time id of the simulation. It is generally equal to one but can #' be higher if working on a subperiod. @@ -178,7 +181,7 @@ #' #' @rdname setSimulationPath setSimulationPath <- function(path, simulation = NULL) { - + if (missing(path)) { if (exists("choose.dir", getNamespace("utils"))) { # choose.dir is defined only on Windows @@ -219,6 +222,8 @@ setSimulationPath <- function(path, simulation = NULL) { # the simulation folder. if (is.null(res$simPath)) { res <- append(res, .getInputOptions(res)) + if(res$antaresVersion>=870) + res <- append(res, .getDimBCGroups(res)) } else { res <- append(res, .getSimOptions(res)) } @@ -477,6 +482,20 @@ setSimulationPath <- function(path, simulation = NULL) { areasWithResClusters <- sort(union(areaList_mc_all[hasResClusters_mc_all], areaList_mc_ind[hasResClusters_mc_ind])) + + # Areas containing short-term clusters + hasSTClusters_mc_all <- laply(file.path(dataPath_mc_all, "areas", areaList_mc_all), function(x) { + f <- list.files(x) + any(grepl("details-STstorage-", f)) + }) + hasSTClusters_mc_ind <- laply(file.path(dataPath_mc_ind, "areas", areaList_mc_ind), function(x) { + f <- list.files(x) + any(grepl("details-STstorage-", f)) + }) + + areasWithSTClusters <- sort(union(areaList_mc_all[hasSTClusters_mc_all], + areaList_mc_ind[hasSTClusters_mc_ind])) + # Available variables variables <- list() @@ -518,6 +537,7 @@ setSimulationPath <- function(path, simulation = NULL) { linksDef = linksDef, areasWithClusters = areasWithClusters, areasWithResClusters = areasWithResClusters, + areasWithSTClusters = areasWithSTClusters, variables = variables, parameters = params ) @@ -628,3 +648,60 @@ setSimulationPath <- function(path, simulation = NULL) { data.table(link = character(), from = character(), to = character()) } } + +# >= v8.7.0 to have dimension of TS for binding constraints +.getDimBCGroups <- function(list_options){ + # list files + bc_path <- file.path(list_options$inputPath, "bindingconstraints") + bc_all_files <- list.files(bc_path, full.names = TRUE) + vector_size <- file.size(bc_all_files) + + # return NULL if no BC + if(sum(vector_size)==0) + return(NULL) + else{ + # return NULL if no .txt files (no values) + search_values <- grepl(x = bc_all_files, pattern = ".txt") + if(!any(search_values)) + return(NULL) + + # keep only values size >0 + bc_name_values_files <- gsub('(.*)_.*', + '\\1', + grep(x = list.files(bc_path), + pattern = ".txt", + value = TRUE)) + + df_info_files <- data.table(path = bc_all_files[search_values], + size = vector_size[search_values], + bc_name = bc_name_values_files) + df_info_files <- df_info_files[size>0,] + + # extract name + group from .ini properties + properties_group <- readIniFile(file = bc_all_files[!search_values]) + + df_groups <- do.call("rbind", + lapply(properties_group, function(x){ + data.table(x$id, + x$group) + })) + names(df_groups)<-c("bc_name", "name_group") + + # merge information + df_groups <- merge(df_info_files, df_groups) + + # read + dim values files + res <- sapply(df_groups$path, function(x){ + file <- data.table::fread(file = x) + dim(file)[2] + }) + + df_groups$dim <- res + + # filter df with only one group with dim > 1 + df_groups <- unique(df_groups[, c("name_group", "dim")]) + df_groups <- df_groups[dim>1] + + return(list(binding = df_groups)) + } +} diff --git a/R/utils_api.R b/R/utils_api.R index 40284089..562e31cd 100644 --- a/R/utils_api.R +++ b/R/utils_api.R @@ -162,6 +162,15 @@ read_secure_json <- function(url, token = NULL, timeout = 60, config = list()) { ) areasWithResClusters <- names(hasResClusters)[hasResClusters] + + hasSTClusters <- unlist( + lapply( + read_secure_json(file.path(dataPath, "areas&depth=2"), ...), + function(x) any(grepl("details-STstorage-", names(x))) + ) + ) + + areasWithSTClusters <- names(hasSTClusters)[hasSTClusters] # Available variables variables <- list() @@ -212,6 +221,7 @@ read_secure_json <- function(url, token = NULL, timeout = 60, config = list()) { linksDef = linksDef, areasWithClusters = intersect(areasWithClusters, areaList), areasWithResClusters = intersect(areasWithResClusters, areaList), + areasWithSTClusters = intersect(areasWithSTClusters, areaList), variables = variables, parameters = params ) diff --git a/R/zzz.R b/R/zzz.R index 0c290891..8efef0a5 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -109,7 +109,8 @@ utils::globalVariables( "thermalPmin", "name", "value", "Folder", "Mode", "Stats", "Name", "progNam", "mrgprice", "isLOLD_cum", "...To", "upstream", "downstream", "LOLD", "LOLD_data", "LOLP", "warn_for_status", - "MRG. PRICE", "H. LEV", "V2", "V1") + "MRG. PRICE", "H. LEV", "V2", "V1", "size", "ORDINAL_POSITION_BY_TOPIC", + "DETAILS_FILES_TYPE","ANTARES_DISPLAYED_NAME") ) ## INPUT Properties REF ---- diff --git a/inst/format_output/simulation_variables_names_by_support.csv b/inst/format_output/simulation_variables_names_by_support.csv new file mode 100644 index 00000000..8e7dc32d --- /dev/null +++ b/inst/format_output/simulation_variables_names_by_support.csv @@ -0,0 +1,9 @@ +"TOPIC";"DETAILS_FILES_TYPE";"ANTARES_DISPLAYED_NAME";"ORDINAL_POSITION_BY_TOPIC";"TITLE";"ALIAS";"MIN_VERSION";"OUTPUT_DISPLAYED_NAME";"RPACKAGE_DISPLAYED_NAME" +"Generation / Thermal";"details";"DTG by plant";1;"Dispatchable Thermal Generation by Thermal Cluster (MWh)";"dtgByPlant";;"MWh";"production" +"Generation / Thermal";"details";"NODU by plant";3;"Number of Dispatched Units by Thermal Cluster";"noduByPlant";;"NODU";"NODU" +"Generation / Thermal";"details";"NP Cost by plant";2;"Non-Proportional Costs by Thermal Cluster (€)";"npCostByPlant";;"NP Cost - Euro";"NP Cost" +"Generation / Thermal";"details";"Profit by plant";4;"Net Profit by Thermal Cluster (€)";"profitByPlant";830;"Profit - Euro";"profit" +"Generation / Renewables";"details-res";"RES generation by plant";1;"Renewable Energy Generation by Power Plant Cluster (MWh)";"resGenerationByPlant";810;"MWh";"production" +"Generation / Short-Term Storages";"details-STstorage";"STS inj by plant";1;"Short-Term Storage Injection by Power Plant (MWh)";"stsInjByPlant";860;"P-injection - MW";"P.injection" +"Generation / Short-Term Storages";"details-STstorage";"STS lvl by plant";2;"Short-Term Storage Level by Power Plant (MWh)";"stsLvlByPlant";860;"Levels - MWh";"levels" +"Generation / Short-Term Storages";"details-STstorage";"STS withdrawal by plant";3;"Short-Term Storage Withdrawal by Power Plant (MWh)";"stsWithdrawalByPlant";860;"P-withdrawal - MW";"P.withdrawal" diff --git a/man/antaresRead-package.Rd b/man/antaresRead-package.Rd index 6e14dd10..79f26bc3 100644 --- a/man/antaresRead-package.Rd +++ b/man/antaresRead-package.Rd @@ -37,6 +37,7 @@ Other contributors: \item Clement Berthet [contributor] \item Kamel Kemiha [contributor] \item Abdallah Mahoudi [contributor] + \item Nicolas Boitard [contributor] \item RTE [copyright holder] } diff --git a/man/readAntares.Rd b/man/readAntares.Rd index 78019479..2e390585 100644 --- a/man/readAntares.Rd +++ b/man/readAntares.Rd @@ -10,6 +10,7 @@ readAntares( clusters = NULL, districts = NULL, clustersRes = NULL, + clustersST = NULL, bindingConstraints = FALSE, misc = FALSE, thermalAvailabilities = FALSE, @@ -53,6 +54,11 @@ import results at renewable cluster level. If \code{NULL} no cluster is imported special value \code{"all"} tells the function to import renewable clusters from all areas.} +\item{clustersST}{Vector containing the name of the areas for which you want to +import results at short-term cluster level. If \code{NULL} no cluster is imported. The +special value \code{"all"} tells the function to import short-term clusters from all +areas.} + \item{bindingConstraints}{Should binding constraints be imported (v8.4+)?} \item{misc}{Vector containing the name of the areas for which you want to @@ -131,6 +137,8 @@ data.tables, each element representing one type of element (areas, links, clusters) } \description{ +\ifelse{html}{\figure{badge_api_ok.svg}{options: alt='Antares API OK'}}{Antares API: \strong{OK}} + \code{readAntares} is a swiss-army-knife function used to read almost every possible time series of an antares Project at any desired time resolution (hourly, daily, weekly, monthly or annual). diff --git a/man/readAntaresSTClusters.Rd b/man/readAntaresSTClusters.Rd new file mode 100644 index 00000000..1ca29727 --- /dev/null +++ b/man/readAntaresSTClusters.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/readAntaresClusters.R +\name{readAntaresSTClusters} +\alias{readAntaresSTClusters} +\title{Read output for a list of short-term storage clusters} +\usage{ +readAntaresSTClusters( + clustersST, + selected = c("P.injection", "levels", "P.withdrawal"), + timeStep = c("hourly", "daily", "weekly", "monthly", "annual"), + opts = simOptions(), + parallel = FALSE, + showProgress = TRUE +) +} +\arguments{ +\item{clustersST}{vector of short-term storage clusters to be imported} + +\item{selected}{vector of thematic trimming} + +\item{timeStep}{Resolution of the data to import: hourly (default), daily, +weekly, monthly or annual.} + +\item{opts}{list of simulation parameters returned by the function +\code{\link{setSimulationPath}}} + +\item{parallel}{Should the importation be parallelized ? (See details)} + +\item{showProgress}{If TRUE the function displays information about the progress of the +importation.} +} +\value{ +data.table of results for short-term storage clusters +} +\description{ +Read output for a list of short-term storage clusters +} diff --git a/man/readInputRES.Rd b/man/readInputRES.Rd index a3176644..0261a942 100644 --- a/man/readInputRES.Rd +++ b/man/readInputRES.Rd @@ -5,7 +5,8 @@ \title{Read Input RES time series} \usage{ readInputRES( - clusters = NULL, + areas = "all", + clusters, opts = simOptions(), timeStep = c("hourly", "daily", "weekly", "monthly", "annual"), simplify = TRUE, @@ -14,6 +15,8 @@ readInputRES( ) } \arguments{ +\item{areas}{vector of RES areas names for which renewable time series must be read.} + \item{clusters}{vector of RES clusters names for which renewable time series must be read.} \item{opts}{list of simulation parameters returned by the function diff --git a/man/readInputThermal.Rd b/man/readInputThermal.Rd index b59424e6..b2eac352 100644 --- a/man/readInputThermal.Rd +++ b/man/readInputThermal.Rd @@ -5,7 +5,9 @@ \title{Read Input thermal time series} \usage{ readInputThermal( - clusters = NULL, + areas = "all", + clusters, + thermalAvailabilities = TRUE, thermalModulation = FALSE, thermalData = FALSE, opts = simOptions(), @@ -16,8 +18,12 @@ readInputThermal( ) } \arguments{ +\item{areas}{vector of areas names for which thermal time series must be read.} + \item{clusters}{vector of clusters names for which thermal time series must be read.} +\item{thermalAvailabilities}{if TRUE, return thermalAvailabilities data} + \item{thermalModulation}{if TRUE, return thermalModulation data} \item{thermalData}{if TRUE, return thermalData from prepro} diff --git a/man/setSimulationPath.Rd b/man/setSimulationPath.Rd index 2045eead..f085a136 100644 --- a/man/setSimulationPath.Rd +++ b/man/setSimulationPath.Rd @@ -62,8 +62,11 @@ each Monte-Carlo simulation.} \item{districtList}{Vector of the available districts.} \item{linkList}{Vector of the available links.} \item{areasWithClusters}{Vector of areas containing clusters.} +\item{areasWithResClusters}{Vector of areas containing clusters renewable.} +\item{areasWithSTClusters}{Vector of areas containing clusters storage (>=v8.6.0).} \item{variables}{Available variables for areas, districts and links.} \item{parameters}{Other parameters of the simulation.} +\item{binding}{Table of time series dimensions for each group (>=v8.7.0).} \item{timeIdMin}{ Minimum time id of the simulation. It is generally equal to one but can be higher if working on a subperiod. diff --git a/revdep/README.md b/revdep/README.md index 16a7e2e3..25ad2f71 100644 --- a/revdep/README.md +++ b/revdep/README.md @@ -10,7 +10,7 @@ |collate |French_France.utf8 | |ctype |French_France.utf8 | |tz |Europe/Paris | -|date |2024-05-27 | +|date |2024-06-13 | |rstudio |2023.12.0+369 Ocean Storm (desktop) | |pandoc |NA | @@ -18,10 +18,11 @@ |package |old |new |Δ | |:-----------|:-----|:-----|:--| -|antaresRead |2.6.1 |2.7.0 |* | +|antaresRead |2.7.0 |2.7.1 |* | |cachem |NA |1.0.8 |* | |fastmap |NA |1.1.1 |* | |openssl |NA |2.1.2 |* | +|rlang |NA |1.1.3 |* | |stringi |NA |1.8.3 |* | # Revdeps diff --git a/tests/testthat/test-importOutputForClusters.R b/tests/testthat/test-importOutputForClusters.R new file mode 100644 index 00000000..c39f574d --- /dev/null +++ b/tests/testthat/test-importOutputForClusters.R @@ -0,0 +1,62 @@ +#Copyright © 2016 RTE Réseau de transport d’électricité + +context("Functions .importOutput") + +path_study_test <- grep(pattern = "87", x = studyPathSV8, value = TRUE) + +opts <- setSimulationPath(path_study_test,simulation="20240105-0934eco") + +test_that(".importOutputForClusters is ok", { + + OutputForClusters <- .importOutputForClusters( + areas="fr", + timeStep="annual", + showProgress=FALSE, + parallel=FALSE, + opts=opts + ) + + required_order_simulation_variables <- c("production","NP Cost","NODU","profit") + + order_simulation_variables <- colnames(OutputForClusters)[colnames(OutputForClusters) %in% required_order_simulation_variables] + + expect_equal(order_simulation_variables,required_order_simulation_variables) + expect_equal(nrow(OutputForClusters),1) +}) + + +test_that(".importOutputForResClusters is ok", { + + OutputForResClusters <- .importOutputForResClusters( + areas="fr", + timeStep="annual", + showProgress=FALSE, + parallel=FALSE, + opts=opts + ) + + required_order_simulation_variables <- c("production") + + order_simulation_variables <- colnames(OutputForResClusters)[colnames(OutputForResClusters) %in% required_order_simulation_variables] + + expect_equal(order_simulation_variables,required_order_simulation_variables) + expect_equal(nrow(OutputForResClusters),1) +}) + +test_that(".importOutputForSTClusters is ok", { + + OutputForSTClusters <- .importOutputForSTClusters( + areas="fr", + timeStep="annual", + showProgress=FALSE, + parallel=FALSE, + opts=opts + ) + + required_order_simulation_variables <- c("P.injection","levels","P.withdrawal") + + order_simulation_variables <- colnames(OutputForSTClusters)[colnames(OutputForSTClusters) %in% required_order_simulation_variables] + + expect_equal(order_simulation_variables,required_order_simulation_variables) + expect_equal(nrow(OutputForSTClusters),1) +}) diff --git a/tests/testthat/test-readAntares_STclusters.R b/tests/testthat/test-readAntares_STclusters.R new file mode 100644 index 00000000..d6789105 --- /dev/null +++ b/tests/testthat/test-readAntares_STclusters.R @@ -0,0 +1,53 @@ +#Copyright © 2016 RTE Réseau de transport d’électricité + +context("Function readAntares (ST clusters)") + +path_study_test <- grep(pattern = "87", x = studyPathSV8, value = TRUE) + +suppressWarnings(opts <- setSimulationPath(path_study_test, simulation = "20240105-0934eco")) + +test_that("ST clusters importation is ok", { + + clustersST <- readAntares(clustersST = "all", timeStep = "annual",opts = opts) + expect_true(all(opts$areasWithSTClusters %in% clustersST$area)) + + clustersST_fr <- readAntares(clustersST = "fr", timeStep = "annual", opts = opts) + expect_true("fr" == unique(clustersST_fr$area)) +}) + + +test_that("ST clusters importation is OK", { + nweeks_study <- 52 + output_cols <- c("P.injection", "levels", "P.withdrawal") + clusters <- readAntaresSTClusters(clusters = "fr_st_other1", selected = output_cols[1:2], timeStep = "hourly", showProgress = FALSE, opts = opts) + expect_is(clusters, "data.table") + expect_true(!is.null(clusters$cluster)) + expect_equal(nrow(clusters), 24 * 7 * nweeks_study) + expect_true(all(output_cols[1:2] %in% colnames(clusters))) + expect_false(output_cols[3] %in% colnames(clusters)) +}) + + +test_that("ST clusters importation is OK for all time resolutions.", { + nweeks_study <- 52 + for (timeStep in c("hourly", "daily", "weekly", "monthly", "annual")) { + expected_rows = switch(timeStep, + hourly = 24 * 7 * nweeks_study, + daily = 7 * nweeks_study, + weekly = nweeks_study, + monthly = 12, + annual = 1) + + clusters <- readAntaresSTClusters(clusters = "fr_st_other1", showProgress = FALSE, timeStep = timeStep) + expect_equal(nrow(clusters), expected_rows) + } +}) + + +test_that("ST clusters importation is KO if clusters do not belong to the study output", { + expect_warning(clusters <- readAntaresSTClusters(clusters = c("fake_one", "not_a_cluster"), timeStep = "hourly", showProgress = FALSE, opts = opts), + regexp = "do not exist in the simulation" + ) + expect_is(clusters, "data.table") + expect_true(nrow(clusters) == 0) +}) diff --git a/tests/testthat/test-readInputClusters.R b/tests/testthat/test-readInputClusters.R index 7d45eafd..7d6c7310 100644 --- a/tests/testthat/test-readInputClusters.R +++ b/tests/testthat/test-readInputClusters.R @@ -9,14 +9,19 @@ sapply(studyPathS, function(studyPath){ if(!isH5Opts(opts)){ test_that("Thermal availabilities importation works", { - input <- readInputThermal(clusters = "peak_must_run_partial", showProgress = FALSE) + # read /series files (default) + input <- readInputThermal(clusters = "peak_must_run_partial", + showProgress = FALSE) expect_is(input, "antaresDataTable") expect_gt(nrow(input), 0) expect_equal(nrow(input) %% (24 * 7 * nweeks), 0) }) test_that("Thermal modulation importation works", { - input <- readInputThermal(clusters = "peak_must_run_partial", thermalModulation = TRUE, showProgress = FALSE) + # read /series + /prepro/modulation.txt + input <- readInputThermal(clusters = "peak_must_run_partial", + thermalModulation = TRUE, + showProgress = FALSE) expect_is(input, "antaresDataList") expect_is(input$thermalModulation, "antaresDataTable") expect_gt(nrow(input$thermalModulation), 0) @@ -24,12 +29,65 @@ sapply(studyPathS, function(studyPath){ }) test_that("Thermal data importation works", { - input <- readInputThermal(clusters = "peak_must_run_partial", thermalModulation = TRUE, showProgress = FALSE) + # read /series + /prepro/data.txt + input <- readInputThermal(clusters = "peak_must_run_partial", + thermalData = TRUE, + showProgress = FALSE) expect_is(input, "antaresDataList") - expect_is(input$thermalModulation, "antaresDataTable") - expect_gt(nrow(input$thermalModulation), 0) - expect_equal(nrow(input$thermalModulation) %% (24 * 7 * nweeks), 0) + expect_is(input$thermalData, "antaresDataTable") + expect_gt(nrow(input$thermalData), 0) + expect_equal(nrow(input$thermalData) %% (24 * 7 * nweeks), 0) + }) + + test_that("Wrong area", { + expect_error(readInputThermal(areas = "BAD_AREA", + clusters = "peak_must_run_partial"), + regexp = "areas are not available") + }) + + test_that("Wrong cluster", { + expect_error(readInputThermal(areas = "all", + clusters = "BAD_CLUSTER"), + regexp = "clusters are not available") + }) + + test_that("No thermal data selected", { + expect_error(readInputThermal(clusters = "peak_must_run_partial", + thermalAvailabilities = FALSE), + regexp = "one type of data should be selected") }) } }) + +# >= v870 ---- +## RES ---- +test_that("test reading TS RES", { + + # read latest version study + path_study_test <- grep(pattern = "87", x = studyPathSV8, value = TRUE) + setSimulationPath(path_study_test, simulation = "input") + + res_clust_properties <- readClusterResDesc() + + test_that("read one cluster", { + # read /series files (default) + input <- readInputRES(areas = "all", + clusters = unique(res_clust_properties$cluster)[1]) + expect_is(input, "antaresDataTable") + expect_gt(nrow(input), 0) + expect_equal(nrow(input) %% (24 * 7 * nweeks), 0) + }) + + test_that("read various clusters", { + nb_cluster <- length(unique(res_clust_properties$cluster)) + # read /series files (default) + input <- readInputRES(areas = "all", + clusters = unique(res_clust_properties$cluster)) + expect_is(input, "antaresDataTable") + expect_gt(nrow(input), 0) + expect_equal(nrow(input) %% (24 * 7 * nweeks), 0) + }) + + +}) diff --git a/tests/testthat/test-setup.R b/tests/testthat/test-setSimulationPath.R similarity index 85% rename from tests/testthat/test-setup.R rename to tests/testthat/test-setSimulationPath.R index 42a6746a..eb5e21f1 100644 --- a/tests/testthat/test-setup.R +++ b/tests/testthat/test-setSimulationPath.R @@ -2,6 +2,7 @@ context("Setup functions") +# v710---- sapply(studyPathS, function(studyPath){ suppressPackageStartupMessages(require(lubridate)) @@ -176,4 +177,34 @@ test_that("Folder 'maps' is not interpreted as a study (#49)", { }) } + +test_that("No meta info areas with a ST cluster < 860", { + opts <- setSimulationPath(studyPath, "input") + expect_true(length(opts$areasWithSTClusters)==0) +}) + +test_that("No meta info binding study < 870", { + opts <- setSimulationPath(studyPath, "input") + expect_null(opts$binding) +}) + +}) + +# v860---- +test_that("New meta data for areas with a ST cluster", { + # read latest version study + path_study_test <- grep(pattern = "87", x = studyPathSV8, value = TRUE) + opts_study_test <- setSimulationPath(path_study_test, simulation = "20240105-0934eco") + + expect_false(is.null(opts_study_test$areasWithSTClusters)) +}) + + +# v870---- +test_that("New meta data for group dimension of binding constraints", { + # read latest version study + path_study_test <- grep(pattern = "87", x = studyPathSV8, value = TRUE) + opts_study_test <- setSimulationPath(path_study_test, simulation = "input") + + expect_is(opts_study_test$binding, "data.table") })