From 97e52bb4b11f8fdf29f73290aaf5c05e7f03262c Mon Sep 17 00:00:00 2001
From: jreps <jreps@its.jnj.com>
Date: Fri, 6 Oct 2023 10:48:24 -0400
Subject: [PATCH 01/10] Update DESCRIPTION

---
 DESCRIPTION | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 2b18de4..bbe388d 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,7 +1,7 @@
 Package: Characterization
 Type: Package
 Title: Characterizations of Cohorts
-Version: 0.1.2
+Version: 0.1.2.9000
 Date: 2023-09-03
 Authors@R: c(
 	person("Jenna", "Reps", , "reps@ohdsi.org", role = c("aut", "cre")),

From f5c2028c334fa4571ebf642d084b950db0199a5d Mon Sep 17 00:00:00 2001
From: jreps <jreps@its.jnj.com>
Date: Fri, 10 Nov 2023 11:51:15 -0500
Subject: [PATCH 02/10] Update Database.R

- adding code to export in batches
---
 R/Database.R | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 70 insertions(+), 2 deletions(-)

diff --git a/R/Database.R b/R/Database.R
index 69a137a..71eafed 100644
--- a/R/Database.R
+++ b/R/Database.R
@@ -303,12 +303,37 @@ exportDatabaseToCsv <- function(
     )
   }
 
+  # max number of rows extracted at a time
+  maxRowCount <- 100000
+
   # get the table names using the function in uploadToDatabase.R
   tables <- getResultTables()
 
   # extract result per table
   for(table in tables){
-    sql <- "select * from @resultSchema.@appendtotable@tablename"
+
+    ParallelLogger::logInfo(paste0('Exporting rows from ', table, ' to csv file'))
+    # get row count and figure out number of loops
+    sql <- "select count(*) as N from @resultSchema.@appendtotable@tablename;"
+    sql <- SqlRender::render(
+      sql = sql,
+      resultSchema = resultSchema,
+      appendtotable = tablePrefix,
+      tablename = table
+    )
+    sql <- SqlRender::translate(
+      sql = sql,
+      targetDialect = targetDialect,
+      tempEmulationSchema = tempEmulationSchema
+    )
+    countN <- DatabaseConnector::querySql(
+      connection = connection,
+      sql = sql,
+      snakeCaseToCamelCase = F
+    )$N
+
+    # get column names
+    sql <- "select * from @resultSchema.@appendtotable@tablename where 1=0;"
     sql <- SqlRender::render(
       sql = sql,
       resultSchema = resultSchema,
@@ -320,6 +345,43 @@ exportDatabaseToCsv <- function(
       targetDialect = targetDialect,
       tempEmulationSchema = tempEmulationSchema
     )
+    cnames <- colnames(DatabaseConnector::querySql(
+      connection = connection,
+      sql = sql,
+      snakeCaseToCamelCase = F
+    ))
+
+    inds <- floor(countN/maxRowCount)
+    tableAppend = F
+    pb = utils::txtProgressBar(min = 0, max = countN, initial = 0)
+
+    for(i in 1:length(inds)){
+
+      startRow <- (i-1)*maxRowCount + 1
+      endRow <- min(i*maxRowCount, countN)
+
+    sql <- "select @cnames from
+    (select *,
+    ROW_NUMBER() OVER(ORDER BY @cnames) AS row
+    from @resultSchema.@appendtotable@tablename
+    ) temp
+    where
+    temp.row >= @start_row and
+    temp.row <= @end_row;"
+    sql <- SqlRender::render(
+      sql = sql,
+      resultSchema = resultSchema,
+      appendtotable = tablePrefix,
+      tablename = table,
+      cnames = paste(cnames, collapse = ','),
+      start_row = startRow,
+      end_row = endRow
+    )
+    sql <- SqlRender::translate(
+      sql = sql,
+      targetDialect = targetDialect,
+      tempEmulationSchema = tempEmulationSchema
+    )
     result <- DatabaseConnector::querySql(
       connection = connection,
       sql = sql,
@@ -330,8 +392,14 @@ exportDatabaseToCsv <- function(
     # save the results as a csv
     readr::write_csv(
       x = result,
-      file = file.path(saveDirectory, paste0(tolower(filePrefix), table,'.csv'))
+      file = file.path(saveDirectory, paste0(tolower(filePrefix), table,'.csv')),
+      append = tableAppend
     )
+    tableAppend = T
+    utils::setTxtProgressBar(pb,endRow)
+
+    }
+    close(pb)
   }
 
   invisible(saveDirectory)

From 4e64c9aaa478af48b9572eef0a33829047ed8931 Mon Sep 17 00:00:00 2001
From: Admin_mschuemi <Admin_mschuemi@its.jnj.com>
Date: Wed, 15 Nov 2023 04:44:53 -0500
Subject: [PATCH 03/10] Using dbFetch to export to CSV in batches. Deprecating
 targetDialect argument.

---
 DESCRIPTION                |  3 ++-
 R/Database.R               | 49 ++++++++++++++++++++++----------------
 man/exportDatabaseToCsv.Rd |  4 ++--
 3 files changed, 32 insertions(+), 24 deletions(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index bbe388d..a6824cb 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -16,7 +16,7 @@ Depends:
 	R (>= 4.0.0)
 Imports:
   Andromeda,
-	DatabaseConnector (>= 5.0.4),
+	DatabaseConnector (>= 6.3.1),
 	FeatureExtraction  (>= 3.0.0),
 	SqlRender (>= 1.9.0),
 	ParallelLogger (>= 3.0.0),
@@ -40,6 +40,7 @@ Remotes:
 	ohdsi/Eunomia,
 	ohdsi/ResultModelManager,
 	ohdsi/ShinyAppBuilder
+	ohdsi/DatabaseConnector@develop
 NeedsCompilation: no
 RoxygenNote: 7.2.3
 Encoding: UTF-8
diff --git a/R/Database.R b/R/Database.R
index 69a137a..ce543ea 100644
--- a/R/Database.R
+++ b/R/Database.R
@@ -255,7 +255,7 @@ createCharacterizationTables <- function(
 #'                                     function \code{connect} in the
 #'                                     \code{DatabaseConnector} package.
 #' @param resultSchema                 The name of the database schema that the result tables will be created.
-#' @param targetDialect                The database management system being used
+#' @param targetDialect                DEPRECATED: derived from \code{connectionDetails}.
 #' @param tablePrefix                  The table prefix to apply to the characterization result tables
 #' @param filePrefix                   The prefix to apply to the files
 #' @param tempEmulationSchema          The temp schema used when the database management system is oracle
@@ -268,7 +268,7 @@ createCharacterizationTables <- function(
 exportDatabaseToCsv <- function(
     connectionDetails,
     resultSchema,
-    targetDialect,
+    targetDialect = NULL,
     tablePrefix = "c_",
     filePrefix = NULL,
     tempEmulationSchema = NULL,
@@ -282,6 +282,9 @@ exportDatabaseToCsv <- function(
     errorMessages = errorMessages
   )
   checkmate::reportAssertions(errorMessages)
+  if (!is.null(targetDialect)) {
+    warning("The targetDialect argument is deprecated")
+  }
 
   if (is.null(filePrefix)) {
     filePrefix = ''
@@ -303,37 +306,41 @@ exportDatabaseToCsv <- function(
     )
   }
 
+  # max number of rows extracted at a time
+  maxRowCount <- 100000
+
   # get the table names using the function in uploadToDatabase.R
   tables <- getResultTables()
 
   # extract result per table
   for(table in tables){
-    sql <- "select * from @resultSchema.@appendtotable@tablename"
+    sql <- "select * from @resultSchema.@appendtotable@tablename;"
     sql <- SqlRender::render(
       sql = sql,
       resultSchema = resultSchema,
       appendtotable = tablePrefix,
       tablename = table
     )
-    sql <- SqlRender::translate(
-      sql = sql,
-      targetDialect = targetDialect,
-      tempEmulationSchema = tempEmulationSchema
-    )
-    result <- DatabaseConnector::querySql(
-      connection = connection,
-      sql = sql,
-      snakeCaseToCamelCase = F
-    )
-    result <- formatDouble(result)
-
-    # save the results as a csv
-    readr::write_csv(
-      x = result,
-      file = file.path(saveDirectory, paste0(tolower(filePrefix), table,'.csv'))
-    )
+    resultSet <- DatabaseConnector::dbSendQuery(connection, sql)
+    tryCatch({
+      first <- TRUE
+      while (!DatabaseConnector::dbHasCompleted(resultSet)) {
+        result <- DatabaseConnector::dbFetch(resultSet, n = maxRowCount)
+        result <- formatDouble(result)
+
+        # save the results as a csv
+        readr::write_csv(
+          x = result,
+          file = file.path(saveDirectory, paste0(tolower(filePrefix), table,'.csv')),
+          append = !first
+        )
+        first <- FALSE
+      }
+    },
+    finally = {
+      DatabaseConnector::dbClearResult(resultSet)
+    })
   }
-
   invisible(saveDirectory)
 }
 
diff --git a/man/exportDatabaseToCsv.Rd b/man/exportDatabaseToCsv.Rd
index 08e5fdc..f851596 100644
--- a/man/exportDatabaseToCsv.Rd
+++ b/man/exportDatabaseToCsv.Rd
@@ -7,7 +7,7 @@
 exportDatabaseToCsv(
   connectionDetails,
   resultSchema,
-  targetDialect,
+  targetDialect = NULL,
   tablePrefix = "c_",
   filePrefix = NULL,
   tempEmulationSchema = NULL,
@@ -21,7 +21,7 @@ function \code{connect} in the
 
 \item{resultSchema}{The name of the database schema that the result tables will be created.}
 
-\item{targetDialect}{The database management system being used}
+\item{targetDialect}{DEPRECATED: derived from \code{connectionDetails}.}
 
 \item{tablePrefix}{The table prefix to apply to the characterization result tables}
 

From 543f77ebb56e5db71b5349826c092a0ebbc62694 Mon Sep 17 00:00:00 2001
From: Admin_mschuemi <Admin_mschuemi@its.jnj.com>
Date: Wed, 15 Nov 2023 04:53:53 -0500
Subject: [PATCH 04/10] Fix typo in DESCRIPTION

---
 DESCRIPTION | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index a6824cb..cb9df32 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -39,7 +39,7 @@ Remotes:
 	ohdsi/FeatureExtraction,
 	ohdsi/Eunomia,
 	ohdsi/ResultModelManager,
-	ohdsi/ShinyAppBuilder
+	ohdsi/ShinyAppBuilder,
 	ohdsi/DatabaseConnector@develop
 NeedsCompilation: no
 RoxygenNote: 7.2.3

From 44c727cab8975af34d48340eba257cfcc0dbd99d Mon Sep 17 00:00:00 2001
From: Admin_mschuemi <Admin_mschuemi@its.jnj.com>
Date: Wed, 15 Nov 2023 05:09:43 -0500
Subject: [PATCH 05/10] Handling case when table in SQLite is empty

---
 R/Database.R | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/R/Database.R b/R/Database.R
index ce543ea..e4d9853 100644
--- a/R/Database.R
+++ b/R/Database.R
@@ -324,7 +324,7 @@ exportDatabaseToCsv <- function(
     resultSet <- DatabaseConnector::dbSendQuery(connection, sql)
     tryCatch({
       first <- TRUE
-      while (!DatabaseConnector::dbHasCompleted(resultSet)) {
+      while (first || !DatabaseConnector::dbHasCompleted(resultSet)) {
         result <- DatabaseConnector::dbFetch(resultSet, n = maxRowCount)
         result <- formatDouble(result)
 

From fb4c408d81963160564d02e55d5d26bda0350cc9 Mon Sep 17 00:00:00 2001
From: Admin_mschuemi <Admin_mschuemi@its.jnj.com>
Date: Thu, 16 Nov 2023 00:11:43 -0500
Subject: [PATCH 06/10] Fix error in dechallenge_rechallenge minCellCount

---
 R/RunCharacterization.R | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/R/RunCharacterization.R b/R/RunCharacterization.R
index 3f66f83..c32f42c 100644
--- a/R/RunCharacterization.R
+++ b/R/RunCharacterization.R
@@ -273,7 +273,7 @@ runCharacterizationAnalyses <- function(
           tablePrefix = tablePrefix,
           minCellCount = minCellCount,
           minCellCountColumns = list(
-              c('numEvents'),
+              c('numCases'),
               c('dechallengeAttempt'),
               c('dechallengeFail', 'dechallengeSuccess'),
               c('rechallengeAttempt'),

From e1addc2dd1937a732035e6168d58e07b8e2e5b62 Mon Sep 17 00:00:00 2001
From: Admin_mschuemi <Admin_mschuemi@its.jnj.com>
Date: Thu, 16 Nov 2023 00:12:28 -0500
Subject: [PATCH 07/10] Adding minMeanCovariateValue argument to
 exportDatabaseToCsv

---
 R/Database.R               | 12 ++++++++++--
 man/exportDatabaseToCsv.Rd |  7 ++++++-
 2 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/R/Database.R b/R/Database.R
index e4d9853..2b23239 100644
--- a/R/Database.R
+++ b/R/Database.R
@@ -260,6 +260,9 @@ createCharacterizationTables <- function(
 #' @param filePrefix                   The prefix to apply to the files
 #' @param tempEmulationSchema          The temp schema used when the database management system is oracle
 #' @param saveDirectory                The directory to save the csv results
+#' @param minMeanCovariateValue        The minimum mean covariate value (i.e. the minimum proportion for
+#'                                     binary covariates) for a covariate to be included in covariate table.
+#'                                     Other covariates are removed to save space.
 #'
 #' @return
 #' csv file per table into the saveDirectory
@@ -272,7 +275,8 @@ exportDatabaseToCsv <- function(
     tablePrefix = "c_",
     filePrefix = NULL,
     tempEmulationSchema = NULL,
-    saveDirectory
+    saveDirectory,
+    minMeanCovariateValue = 0.001
 ){
 
   errorMessages <- checkmate::makeAssertCollection()
@@ -307,7 +311,7 @@ exportDatabaseToCsv <- function(
   }
 
   # max number of rows extracted at a time
-  maxRowCount <- 100000
+  maxRowCount <- 1e6
 
   # get the table names using the function in uploadToDatabase.R
   tables <- getResultTables()
@@ -326,6 +330,10 @@ exportDatabaseToCsv <- function(
       first <- TRUE
       while (first || !DatabaseConnector::dbHasCompleted(resultSet)) {
         result <- DatabaseConnector::dbFetch(resultSet, n = maxRowCount)
+        if (table == "covariates" && minMeanCovariateValue > 0) {
+          result <- result %>%
+            dplyr::filter(.data$average_value >= minMeanCovariateValue)
+        }
         result <- formatDouble(result)
 
         # save the results as a csv
diff --git a/man/exportDatabaseToCsv.Rd b/man/exportDatabaseToCsv.Rd
index f851596..e39f860 100644
--- a/man/exportDatabaseToCsv.Rd
+++ b/man/exportDatabaseToCsv.Rd
@@ -11,7 +11,8 @@ exportDatabaseToCsv(
   tablePrefix = "c_",
   filePrefix = NULL,
   tempEmulationSchema = NULL,
-  saveDirectory
+  saveDirectory,
+  minMeanCovariateValue = 0.001
 )
 }
 \arguments{
@@ -30,6 +31,10 @@ function \code{connect} in the
 \item{tempEmulationSchema}{The temp schema used when the database management system is oracle}
 
 \item{saveDirectory}{The directory to save the csv results}
+
+\item{minMeanCovariateValue}{The minimum mean covariate value (i.e. the minimum proportion for
+binary covariates) for a covariate to be included in covariate table.
+Other covariates are removed to save space.}
 }
 \value{
 csv file per table into the saveDirectory

From 570bf4ff4486e953c25be2ef07fe265bd75b8ab1 Mon Sep 17 00:00:00 2001
From: jreps <jreps@its.jnj.com>
Date: Mon, 20 Nov 2023 10:26:04 -0500
Subject: [PATCH 08/10] optimizing aggregate features

- removing T and not Os from aggregate since these can be calculated from T and T with Os
- updating tests after removing T and not Os
- updating function to export to csv in batches when a table is empty
---
 R/Database.R                                  | 101 +++++++-----
 .../sql/sql_server/DropAggregateCovariate.sql |   6 -
 .../createTargetOutcomeCombinations.sql       | 150 +++++++++---------
 tests/testthat/test-aggregateCovariate.R      |   4 +-
 4 files changed, 142 insertions(+), 119 deletions(-)

diff --git a/R/Database.R b/R/Database.R
index 71eafed..5bfb3c7 100644
--- a/R/Database.R
+++ b/R/Database.R
@@ -351,16 +351,17 @@ exportDatabaseToCsv <- function(
       snakeCaseToCamelCase = F
     ))
 
-    inds <- floor(countN/maxRowCount)
-    tableAppend = F
-    pb = utils::txtProgressBar(min = 0, max = countN, initial = 0)
+    if(countN > 0){
+      inds <- floor(countN/maxRowCount)
+      tableAppend = F
+      pb = utils::txtProgressBar(min = 0, max = countN, initial = 0)
 
-    for(i in 1:length(inds)){
+      for(i in 1:length(inds)){
 
-      startRow <- (i-1)*maxRowCount + 1
-      endRow <- min(i*maxRowCount, countN)
+        startRow <- (i-1)*maxRowCount + 1
+        endRow <- min(i*maxRowCount, countN)
 
-    sql <- "select @cnames from
+        sql <- "select @cnames from
     (select *,
     ROW_NUMBER() OVER(ORDER BY @cnames) AS row
     from @resultSchema.@appendtotable@tablename
@@ -368,38 +369,66 @@ exportDatabaseToCsv <- function(
     where
     temp.row >= @start_row and
     temp.row <= @end_row;"
-    sql <- SqlRender::render(
-      sql = sql,
-      resultSchema = resultSchema,
-      appendtotable = tablePrefix,
-      tablename = table,
-      cnames = paste(cnames, collapse = ','),
-      start_row = startRow,
-      end_row = endRow
-    )
-    sql <- SqlRender::translate(
-      sql = sql,
-      targetDialect = targetDialect,
-      tempEmulationSchema = tempEmulationSchema
-    )
-    result <- DatabaseConnector::querySql(
-      connection = connection,
-      sql = sql,
-      snakeCaseToCamelCase = F
-    )
-    result <- formatDouble(result)
+        sql <- SqlRender::render(
+          sql = sql,
+          resultSchema = resultSchema,
+          appendtotable = tablePrefix,
+          tablename = table,
+          cnames = paste(cnames, collapse = ','),
+          start_row = startRow,
+          end_row = endRow
+        )
+        sql <- SqlRender::translate(
+          sql = sql,
+          targetDialect = targetDialect,
+          tempEmulationSchema = tempEmulationSchema
+        )
+        result <- DatabaseConnector::querySql(
+          connection = connection,
+          sql = sql,
+          snakeCaseToCamelCase = F
+        )
+        result <- formatDouble(result)
 
-    # save the results as a csv
-    readr::write_csv(
-      x = result,
-      file = file.path(saveDirectory, paste0(tolower(filePrefix), table,'.csv')),
-      append = tableAppend
-    )
-    tableAppend = T
-    utils::setTxtProgressBar(pb,endRow)
+        # save the results as a csv
+        readr::write_csv(
+          x = result,
+          file = file.path(saveDirectory, paste0(tolower(filePrefix), table,'.csv')),
+          append = tableAppend
+        )
+        tableAppend = T
+        utils::setTxtProgressBar(pb,endRow)
+
+      }
+      close(pb)
+    } else{
+      sql <- "select * from
+      @resultSchema.@appendtotable@tablename;"
+      sql <- SqlRender::render(
+        sql = sql,
+        resultSchema = resultSchema,
+        appendtotable = tablePrefix,
+        tablename = table
+      )
+      sql <- SqlRender::translate(
+        sql = sql,
+        targetDialect = targetDialect,
+        tempEmulationSchema = tempEmulationSchema
+      )
+      result <- DatabaseConnector::querySql(
+        connection = connection,
+        sql = sql,
+        snakeCaseToCamelCase = F
+      )
+      result <- formatDouble(result)
 
+      # save the results as a csv
+      readr::write_csv(
+        x = result,
+        file = file.path(saveDirectory, paste0(tolower(filePrefix), table,'.csv')),
+        append = tableAppend
+      )
     }
-    close(pb)
   }
 
   invisible(saveDirectory)
diff --git a/inst/sql/sql_server/DropAggregateCovariate.sql b/inst/sql/sql_server/DropAggregateCovariate.sql
index b319cff..3e603cb 100644
--- a/inst/sql/sql_server/DropAggregateCovariate.sql
+++ b/inst/sql/sql_server/DropAggregateCovariate.sql
@@ -21,12 +21,6 @@ DROP TABLE #target_with_outcome;
 TRUNCATE TABLE #target_outcome_f;
 DROP TABLE #target_outcome_f;
 
-TRUNCATE TABLE #target_nooutcome;
-DROP TABLE #target_nooutcome;
-
-TRUNCATE TABLE #target_noout_f;
-DROP TABLE #target_noout_f;
-
 TRUNCATE TABLE #agg_cohorts;
 DROP TABLE #agg_cohorts;
 
diff --git a/inst/sql/sql_server/createTargetOutcomeCombinations.sql b/inst/sql/sql_server/createTargetOutcomeCombinations.sql
index 5b56ad4..8329fd4 100644
--- a/inst/sql/sql_server/createTargetOutcomeCombinations.sql
+++ b/inst/sql/sql_server/createTargetOutcomeCombinations.sql
@@ -10,7 +10,7 @@ drop table if exists #targets_agg;
 select * into #targets_agg
 from
 (select *,
-row_number() over(partition by subject_id, cohort_definition_id, cohort_start_date order by cohort_start_date asc) as rn
+row_number() over(partition by subject_id, cohort_definition_id order by cohort_start_date asc) as rn
 from @target_database_schema.@target_table
 where cohort_definition_id in
 (@target_ids)
@@ -69,16 +69,16 @@ CROSS JOIN
 
 union
 
-select distinct
-t.cohort_definition_id as target_cohort_id,
-o.cohort_definition_id as outcome_cohort_id,
-'TnOc' as cohort_type
-from
-(select distinct cohort_definition_id from #targets_agg) as t
-CROSS JOIN
-(select distinct cohort_definition_id from #outcomes_agg) as o
+--select distinct
+--t.cohort_definition_id as target_cohort_id,
+--o.cohort_definition_id as outcome_cohort_id,
+--'TnOc' as cohort_type
+--from
+--(select distinct cohort_definition_id from #targets_agg) as t
+--CROSS JOIN
+--(select distinct cohort_definition_id from #outcomes_agg) as o
 
-union
+--union
 
 select distinct
 t.cohort_definition_id as target_cohort_id,
@@ -102,16 +102,16 @@ CROSS JOIN
 
 union
 
-select distinct
-t.cohort_definition_id as target_cohort_id,
-o.cohort_definition_id as outcome_cohort_id,
-'TnfirstOc' as cohort_type
-from
-(select distinct cohort_definition_id from #targets_agg) as t
-CROSS JOIN
-(select distinct cohort_definition_id from #outcomes_agg) as o
+--select distinct
+--t.cohort_definition_id as target_cohort_id,
+--o.cohort_definition_id as outcome_cohort_id,
+--'TnfirstOc' as cohort_type
+--from
+--(select distinct cohort_definition_id from #targets_agg) as t
+--CROSS JOIN
+--(select distinct cohort_definition_id from #outcomes_agg) as o
 
-union
+--union
 
 select distinct
 t.cohort_definition_id as target_cohort_id,
@@ -191,39 +191,39 @@ o.cohort_start_date >= dateadd(day, @tar_start, t.@tar_start_anchor);
 
 
 -- 2) get all the people without the outcome in TAR
-drop table if exists #target_nooutcome;
-select
-t.subject_id,
-t.cohort_start_date,
-t.cohort_end_date,
-t.cohort_definition_id as target_cohort_id,
-o.cohort_definition_id as outcome_cohort_id
-into #target_nooutcome
-from #targets_agg t
-CROSS JOIN
-( select distinct cohort_definition_id from #outcomes_agg) o
-left outer join #target_with_outcome two
-on t.cohort_definition_id = two.target_cohort_id
-and t.subject_id = two.subject_id
-and o.cohort_definition_id = two.outcome_cohort_id
-where two.subject_id IS NULL;
-
-drop table if exists #target_noout_f;
-select
-t.subject_id,
-t.cohort_start_date,
-t.cohort_end_date,
-t.cohort_definition_id as target_cohort_id,
-o.cohort_definition_id as outcome_cohort_id
-into #target_noout_f
-from #targets_agg t
-CROSS JOIN
-( select distinct cohort_definition_id from #outcomes_agg) o
-left outer join #target_outcome_f two
-on t.cohort_definition_id = two.target_cohort_id
-and t.subject_id = two.subject_id
-and o.cohort_definition_id = two.outcome_cohort_id
-where two.subject_id IS NULL;
+--drop table if exists #target_nooutcome;
+--select
+--t.subject_id,
+--t.cohort_start_date,
+--t.cohort_end_date,
+--t.cohort_definition_id as target_cohort_id,
+--o.cohort_definition_id as outcome_cohort_id
+--into #target_nooutcome
+--from #targets_agg t
+--CROSS JOIN
+--( select distinct cohort_definition_id from #outcomes_agg) o
+--left outer join #target_with_outcome two
+--on t.cohort_definition_id = two.target_cohort_id
+--and t.subject_id = two.subject_id
+--and o.cohort_definition_id = two.outcome_cohort_id
+--where two.subject_id IS NULL;
+
+--drop table if exists #target_noout_f;
+--select
+--t.subject_id,
+--t.cohort_start_date,
+--t.cohort_end_date,
+--t.cohort_definition_id as target_cohort_id,
+--o.cohort_definition_id as outcome_cohort_id
+--into #target_noout_f
+--from #targets_agg t
+--CROSS JOIN
+--( select distinct cohort_definition_id from #outcomes_agg) o
+--left outer join #target_outcome_f two
+--on t.cohort_definition_id = two.target_cohort_id
+--and t.subject_id = two.subject_id
+--and o.cohort_definition_id = two.outcome_cohort_id
+--where two.subject_id IS NULL;
 
 -- Final: select into #agg_cohorts
 
@@ -294,33 +294,33 @@ union
 
 -- T without O
 
-select
-tnoc.subject_id,
-tnoc.cohort_start_date,
-tnoc.cohort_end_date,
-cd.cohort_definition_id
-from #target_nooutcome tnoc
-INNER JOIN #cohort_details cd
-on cd.target_cohort_id = tnoc.target_cohort_id
-and cd.outcome_cohort_id = tnoc.outcome_cohort_id
-and cd.cohort_type = 'TnOc'
+--select
+--tnoc.subject_id,
+--tnoc.cohort_start_date,
+--tnoc.cohort_end_date,
+--cd.cohort_definition_id
+--from #target_nooutcome tnoc
+--INNER JOIN #cohort_details cd
+--on cd.target_cohort_id = tnoc.target_cohort_id
+--and cd.outcome_cohort_id = tnoc.outcome_cohort_id
+--and cd.cohort_type = 'TnOc'
 
-union
+--union
 
 -- T without first O
 
-select
-tnoc.subject_id,
-tnoc.cohort_start_date,
-tnoc.cohort_end_date,
-cd.cohort_definition_id
-from #target_noout_f tnoc
-INNER JOIN #cohort_details cd
-on cd.target_cohort_id = tnoc.target_cohort_id
-and cd.outcome_cohort_id = tnoc.outcome_cohort_id
-and cd.cohort_type = 'TnfirstOc'
-
-union
+--select
+--tnoc.subject_id,
+--tnoc.cohort_start_date,
+--tnoc.cohort_end_date,
+--cd.cohort_definition_id
+--from #target_noout_f tnoc
+--INNER JOIN #cohort_details cd
+--on cd.target_cohort_id = tnoc.target_cohort_id
+--and cd.outcome_cohort_id = tnoc.outcome_cohort_id
+--and cd.cohort_type = 'TnfirstOc'
+
+--union
 
 -- Ts and Os
 
diff --git a/tests/testthat/test-aggregateCovariate.R b/tests/testthat/test-aggregateCovariate.R
index af70431..9f1e9d0 100644
--- a/tests/testthat/test-aggregateCovariate.R
+++ b/tests/testthat/test-aggregateCovariate.R
@@ -97,7 +97,7 @@ test_that("computeAggregateCovariateAnalyses", {
 
   testthat::expect_true(inherits(agc, "CovariateData"))
   testthat::expect_true(length(unique(as.data.frame(agc$covariates)$cohortDefinitionId))
-  <= length(res$targetIds) * length(res$outcomeIds) * 6 + length(res$targetIds)*2 + length(res$outcomeIds)*2)
+  <= length(res$targetIds) * length(res$outcomeIds) * 4 + length(res$targetIds)*2 + length(res$outcomeIds)*2)
   testthat::expect_true(
     sum(names(agc) %in% c(
       "analysisRef",
@@ -121,7 +121,7 @@ test_that("computeAggregateCovariateAnalyses", {
   )
 
   testthat::expect_true(
-    nrow(as.data.frame(agc$cohortDetails)) == 26 # 8 T/Os, 6 TnO, 6 TnOc, 6 OnT
+    nrow(as.data.frame(agc$cohortDetails)) == 20 # 8 T/Os, 6 TnO, 0 TnOc, 6 OnT
   )
 
   # test saving/loading

From 31d94736e9e20570dcdecdf6b8d529565bac2454 Mon Sep 17 00:00:00 2001
From: jreps <jreps@its.jnj.com>
Date: Tue, 28 Nov 2023 13:40:26 -0500
Subject: [PATCH 09/10] Update DESCRIPTION

---
 DESCRIPTION | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index cb9df32..6ac5ea4 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -40,7 +40,7 @@ Remotes:
 	ohdsi/Eunomia,
 	ohdsi/ResultModelManager,
 	ohdsi/ShinyAppBuilder,
-	ohdsi/DatabaseConnector@develop
+	ohdsi/DatabaseConnector
 NeedsCompilation: no
 RoxygenNote: 7.2.3
 Encoding: UTF-8

From ea3c228503b346975626f3e0589bc4fd617be985 Mon Sep 17 00:00:00 2001
From: jreps <jreps@its.jnj.com>
Date: Tue, 28 Nov 2023 13:40:43 -0500
Subject: [PATCH 10/10] Update DESCRIPTION

---
 DESCRIPTION | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/DESCRIPTION b/DESCRIPTION
index 6ac5ea4..2c79450 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,7 +1,7 @@
 Package: Characterization
 Type: Package
 Title: Characterizations of Cohorts
-Version: 0.1.2.9000
+Version: 0.1.3
 Date: 2023-09-03
 Authors@R: c(
 	person("Jenna", "Reps", , "reps@ohdsi.org", role = c("aut", "cre")),