Skip to content

Commit

Permalink
Merge pull request #4 from mps9506/load-report
Browse files Browse the repository at this point in the history
Load report
  • Loading branch information
mps9506 authored Nov 16, 2022
2 parents 3020101 + cd0915e commit 3a4f30e
Show file tree
Hide file tree
Showing 59 changed files with 155,738 additions and 83 deletions.
26 changes: 26 additions & 0 deletions R/metadata.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
## document metadata

## note: this is run outside of the targets workflow
library(dataspice)
create_spice(dir = "data/Output/")

prep_access(data_path = "data/Output",
access_path = "data/Output/metadata/access.csv",
recursive = TRUE)

edit_access(metadata_dir = "data/Output/metadata")

prep_attributes(data_path = "data/Output",
attributes_path = "data/Output/metadata/attributes.csv",
recursive = TRUE)

edit_attributes(metadata_dir = "data/Output/metadata")

edit_creators(metadata_dir = "data/Output/metadata")

edit_biblio(metadata_dir = "data/Output/metadata")

write_spice(path = "data/Output/metadata")


build_site("data/Output/metadata/dataspice.json")
9 changes: 8 additions & 1 deletion README.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,14 @@ output: github_document
knitr::opts_chunk$set(echo = TRUE)
```

This is a placeholder.
This is the analysis code and data for the Texas Coastal Nutrient Input Repository project. This project develops estimates of actual and flow-normalized daily nitrate (NO^3^-N) and total phosphorus (TP) watershed loads. Reports and publications are forthcoming.

Data analysis and models were developed in R using the [renv](https://rstudio.github.io/renv/) and [targets](https://docs.ropensci.org/targets/) R packages to facilitate reproducibility. To reproduce this analysis clone the repository to your local machine, ensure both renv and targets are installed and open the project. The analysis can be reproduced using:

```r
renv::restore()
targets::tar_make()
```

## Funding

Expand Down
17 changes: 16 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,22 @@ Texas Coastal Nutrient Input Repository (Phase 1 - Lavaca Bay)
================
Michael Schramm - Texas Water Resources Institute, Research Specialist

This is a placeholder.
This is the analysis code and data for the Texas Coastal Nutrient Input
Repository project. This project develops estimates of actual and
flow-normalized daily nitrate (NO<sup>3</sup>-N) and total phosphorus
(TP) watershed loads. Reports and publications are forthcoming.

Data analysis and models were developed in R using the
[renv](https://rstudio.github.io/renv/) and
[targets](https://docs.ropensci.org/targets/) R packages to facilitate
reproducibility. To reproduce this analysis clone the repository to your
local machine, ensure both renv and targets are installed and open the
project. The analysis can be reproduced using:

``` r
renv::restore()
targets::tar_make()
```

## Funding

Expand Down
307 changes: 303 additions & 4 deletions _targets.R
Original file line number Diff line number Diff line change
Expand Up @@ -647,14 +647,313 @@ list(
))
,

### NO3 navidad
tar_target(daily_no3_08164390,
predict_daily(
model = no3_08164390, # target
data = model_data, # target
site_no = "usgs08164390", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower #unquoted
)),
## flow-normalized NO3
tar_target(daily_no3_08164390_fn,
predict_daily(
model = no3_08164390, # target
data = flow_normalized_data, # target
site_no = "usgs08164390", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower, #unquoted
fn_data = TRUE
)),
### TP navidad
tar_target(daily_tp_08164390,
predict_daily(
model = tp_08164390, # target
data = model_data, # target
site_no = "usgs08164390", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower #unquoted
)),
### flow-normalized TP
tar_target(daily_tp_08164390_fn,
predict_daily(
model = tp_08164390, # target
data = flow_normalized_data, # target
site_no = "usgs08164390", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower, #unquoted
fn_data = TRUE
)),

# create loading output file (csv)
tar_target(write_daily,
loads_to_csv(list("lav" = tar_read(daily_tp_08164000),
"tex" = tar_read(daily_tp_texana)),
### NO3 sandy
tar_target(daily_no3_08164450,
predict_daily(
model = no3_08164450, # target
data = model_data, # target
site_no = "usgs08164450", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower #unquoted
)),
## flow-normalized NO3
tar_target(daily_no3_08164450_fn,
predict_daily(
model = no3_08164450, # target
data = flow_normalized_data, # target
site_no = "usgs08164450", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower, #unquoted
fn_data = TRUE
)),
### TP sandy
tar_target(daily_tp_08164450,
predict_daily(
model = tp_08164450, # target
data = model_data, # target
site_no = "usgs08164450", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower #unquoted
)),
### flow-normalized TP
tar_target(daily_tp_08164450_fn,
predict_daily(
model = tp_08164450, # target
data = flow_normalized_data, # target
site_no = "usgs08164450", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower, #unquoted
fn_data = TRUE
)),
### NO3 W Mustang
tar_target(daily_no3_08164503,
predict_daily(
model = no3_08164503, # target
data = model_data, # target
site_no = "usgs08164503", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower #unquoted
)),
## flow-normalized NO3
tar_target(daily_no3_08164503_fn,
predict_daily(
model = no3_08164503, # target
data = flow_normalized_data, # target
site_no = "usgs08164503", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower, #unquoted
fn_data = TRUE
)),
### TP W Mustang
tar_target(daily_tp_08164503,
predict_daily(
model = no3_08164503, # target
data = model_data, # target
site_no = "usgs08164503", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower #unquoted
)),
### flow-normalized TP
tar_target(daily_tp_08164503_fn,
predict_daily(
model = tp_08164503, # target
data = flow_normalized_data, # target
site_no = "usgs08164503", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower, #unquoted
fn_data = TRUE
)),
### NO3 E Mustang
tar_target(daily_no3_08164504,
predict_daily(
model = no3_08164504, # target
data = model_data, # target
site_no = "usgs08164504", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower #unquoted
)),
## flow-normalized NO3
tar_target(daily_no3_08164504_fn,
predict_daily(
model = no3_08164504, # target
data = flow_normalized_data, # target
site_no = "usgs08164504", # quoted string
date = "2005-01-01", # quoted string
output_name = NO3_Estimate, #unquoted
output_upper = NO3_Upper, #unquoted
output_lower = NO3_Lower, #unquoted
fn_data = TRUE
)),
### TP E Mustang
tar_target(daily_tp_08164504,
predict_daily(
model = tp_08164504, # target
data = model_data, # target
site_no = "usgs08164504", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower #unquoted
)),
### flow-normalized TP
tar_target(daily_tp_08164504_fn,
predict_daily(
model = tp_08164504, # target
data = flow_normalized_data, # target
site_no = "usgs08164504", # quoted string
date = "2000-01-01", # quoted string
output_name = TP_Estimate, #unquoted
output_upper = TP_Upper, #unquoted
output_lower = TP_Lower, #unquoted
fn_data = TRUE
)),
# create loading output files (csv)
tar_target(write_daily_tp,
loads_to_csv(list("lavaca" = tar_read(daily_tp_08164000),
"texana" = tar_read(daily_tp_texana),
"navidad" = tar_read(daily_tp_08164390),
"sandy" = tar_read(daily_tp_08164450),
"w_mustang" = tar_read(daily_tp_08164503),
"e_mustang" = tar_read(daily_tp_08164504)),
df = "daily",
output = "data/Output/daily_loads/tp_daily_loads.csv"),
format = "file"),
tar_target(write_daily_tp_fn,
loads_to_csv(list("lavaca" = tar_read(daily_tp_08164000_fn),
"texana" = tar_read(daily_tp_texana_fn),
"navidad" = tar_read(daily_tp_08164390_fn),
"sandy" = tar_read(daily_tp_08164450_fn),
"w_mustang" = tar_read(daily_tp_08164503_fn),
"e_mustang" = tar_read(daily_tp_08164504_fn)),
df = "daily",
output = "data/Output/daily_loads/tp_daily_loads_flow_normalized.csv"),
format = "file"),
tar_target(write_monthly_tp,
loads_to_csv(list("lavaca" = tar_read(daily_tp_08164000),
"texana" = tar_read(daily_tp_texana),
"navidad" = tar_read(daily_tp_08164390),
"sandy" = tar_read(daily_tp_08164450),
"w_mustang" = tar_read(daily_tp_08164503),
"e_mustang" = tar_read(daily_tp_08164504)),
df = "monthly",
output = "data/Output/monthly_loads/tp_monthly_loads.csv"),
format = "file"),
tar_target(write_monthly_tp_fn,
loads_to_csv(list("lavaca" = tar_read(daily_tp_08164000_fn),
"texana" = tar_read(daily_tp_texana_fn),
"navidad" = tar_read(daily_tp_08164390_fn),
"sandy" = tar_read(daily_tp_08164450_fn),
"w_mustang" = tar_read(daily_tp_08164503_fn),
"e_mustang" = tar_read(daily_tp_08164504_fn)),
df = "monthly",
output = "data/Output/monthly_loads/tp_monthly_loads_flow_normalized.csv"),
format = "file"),
tar_target(write_annual_tp,
loads_to_csv(list("lavaca" = tar_read(daily_tp_08164000),
"texana" = tar_read(daily_tp_texana),
"navidad" = tar_read(daily_tp_08164390),
"sandy" = tar_read(daily_tp_08164450),
"w_mustang" = tar_read(daily_tp_08164503),
"e_mustang" = tar_read(daily_tp_08164504)),
df = "annually",
output = "data/Output/annual_loads/tp_annual_loads.csv"),
format = "file"),
tar_target(write_annual_tp_fn,
loads_to_csv(list("lavaca" = tar_read(daily_tp_08164000_fn),
"texana" = tar_read(daily_tp_texana_fn),
"navidad" = tar_read(daily_tp_08164390_fn),
"sandy" = tar_read(daily_tp_08164450_fn),
"w_mustang" = tar_read(daily_tp_08164503_fn),
"e_mustang" = tar_read(daily_tp_08164504_fn)),
df = "annually",
output = "data/Output/annual_loads/tp_annual_loads_flow_normalized.csv"),
format = "file"),

tar_target(write_daily_no3,
loads_to_csv(list("lavaca" = tar_read(daily_no3_08164000),
"texana" = tar_read(daily_no3_texana),
"navidad" = tar_read(daily_no3_08164390),
"sandy" = tar_read(daily_no3_08164450),
"w_mustang" = tar_read(daily_no3_08164503),
"e_mustang" = tar_read(daily_no3_08164504)),
df = "daily",
output = "data/Output/daily_loads/no3_daily_loads.csv"),
format = "file"),
tar_target(write_daily_no3_fn,
loads_to_csv(list("lavaca" = tar_read(daily_no3_08164000_fn),
"texana" = tar_read(daily_no3_texana_fn),
"navidad" = tar_read(daily_no3_08164390_fn),
"sandy" = tar_read(daily_no3_08164450_fn),
"w_mustang" = tar_read(daily_no3_08164503_fn),
"e_mustang" = tar_read(daily_no3_08164504_fn)),
df = "daily",
output = "data/Output/daily_loads/no3_daily_loads_flow_normalized.csv"),
format = "file"),
tar_target(write_monthly_no3,
loads_to_csv(list("lavaca" = tar_read(daily_no3_08164000),
"texana" = tar_read(daily_no3_texana),
"navidad" = tar_read(daily_no3_08164390),
"sandy" = tar_read(daily_no3_08164450),
"w_mustang" = tar_read(daily_no3_08164503),
"e_mustang" = tar_read(daily_no3_08164504)),
df = "monthly",
output = "data/Output/monthly_loads/no3_monthly_loads.csv"),
format = "file"),
tar_target(write_monthly_no3_fn,
loads_to_csv(list("lavaca" = tar_read(daily_no3_08164000_fn),
"texana" = tar_read(daily_no3_texana_fn),
"navidad" = tar_read(daily_no3_08164390_fn),
"sandy" = tar_read(daily_no3_08164450_fn),
"w_mustang" = tar_read(daily_no3_08164503_fn),
"e_mustang" = tar_read(daily_no3_08164504_fn)),
df = "monthly",
output = "data/Output/monthly_loads/no3_monthly_loads_flow_normalized.csv"),
format = "file"),
tar_target(write_annual_no3,
loads_to_csv(list("lavaca" = tar_read(daily_no3_08164000),
"texana" = tar_read(daily_no3_texana),
"navidad" = tar_read(daily_no3_08164390),
"sandy" = tar_read(daily_no3_08164450),
"w_mustang" = tar_read(daily_no3_08164503),
"e_mustang" = tar_read(daily_no3_08164504)),
df = "annually",
output = "data/Output/annual_loads/no3_annual_loads.csv"),
format = "file"),
tar_target(write_annual_no3_flow_normalized,
loads_to_csv(list("lavaca" = tar_read(daily_no3_08164000_fn),
"texana" = tar_read(daily_no3_texana_fn),
"navidad" = tar_read(daily_no3_08164390_fn),
"sandy" = tar_read(daily_no3_08164450_fn),
"w_mustang" = tar_read(daily_no3_08164503_fn),
"e_mustang" = tar_read(daily_no3_08164504_fn)),
df = "annually",
output = "data/Output/annual_loads/no3_annual_loads_flow_normalized.csv"),
format = "file"),

# loading estimates pdf
tar_quarto(loading_estimates, "reports/load_estimates/load_estimates.qmd"),
Expand Down
Loading

0 comments on commit 3a4f30e

Please sign in to comment.