From 56f51b8bab6f37be5572700ff9f986854ca1fe0c Mon Sep 17 00:00:00 2001 From: Soren Rasmussen Date: Thu, 26 Sep 2024 18:14:30 -0600 Subject: [PATCH] Updating references to Derecho --- src/README.build.md | 100 +++++++----------- tests/README.md | 33 +++--- .../get_config_file_meta_data.py | 2 +- tests/conftest.py | 6 +- tests/local/README.md | 12 +-- tests/local/derecho/example_croton_test.sh | 6 +- tests/local/derecho/model_test.sh | 17 ++- tests/local/examples/ex2_run_conus_derecho.sh | 15 ++- tests/local/run_tests.py | 16 +-- tests/utilities.py | 10 +- 10 files changed, 93 insertions(+), 124 deletions(-) diff --git a/src/README.build.md b/src/README.build.md index de151a33c..480f44d97 100644 --- a/src/README.build.md +++ b/src/README.build.md @@ -1,75 +1,55 @@ -This README provides a brief description of how to build WRF-Hydro as a standalone -modeling system. - -More details regarding the model as well as documentation and user guides can be found on -the project website: +# Standalone WRF-Hydro Build Instructions +Details regarding the model as well as documentation and user guides can be +found on the project website: https://ral.ucar.edu/projects/wrf_hydro -Requirements: - * A supported Fortran compiler: - - PGI (pgfortran) - - Intel (ifort) - - GNU (gfortran) - * MPI libraries (MPICH or Open MPI) - * netCDF C & Fortran libraries version 4 or greater +## Requirements +* A supported Fortran compiler: + - GNU (gfortran) + - Cray (ftn) + - Intel (ifort) + - NVidia (nvfortran) +* MPI libraries (MPICH or Open MPI) +* NetCDF C & Fortran libraries v4.0+ - Please note that these libraries need to be compiled with the same set of compilers - that will be used to compile WRF-Hydro +Please note that these libraries need to be compiled with the same set of +compilers that will be used to compile WRF-Hydro -Steps for building WRF-Hydro: +## Building WRF-Hydro 1. Obtain the source code - The source code for the latest WRF-Hydro release can be obtained here: - https://github.com/NCAR/wrf_hydro_nwm_public/releases/latest - - Download and unpack the source code and navigate to the directory where you will - compile the code: - - cd wrf_hydro*/src - -2. Set the required environment variables - - Note: If the required netCDF libraries are not already available on your system - (these are commonly available as a module on high performance computing environments), - you will need to install them now. - - First ensure the environment variables describing where your netCDF libraries live - are set appropriately so that the compiler can find them. For a bash shell the - following should work with replaced by the installation prefix for your - netCDF libraries. - - export NETCDF_INC"/include" - export NETCDF_LIB="/lib" - - Then copy over the setEnvar.sh script from the 'template' directory and edit - the WRF-Hydro environment variables / compile time options in the file as needed. - This file can then be passed as an argument (see step #4) to the compile scripts - below which will source the environment variables for you. Alternatively these - environment variables can be set by the user. +The source code for the latest WRF-Hydro release can be obtained here: +https://github.com/NCAR/wrf_hydro_nwm_public/releases/latest -3. Configure +Download and unpack the source code and navigate to the directory where you +will compile the code: - To configure the model run the following and select the appropriate option for - your system / compiler: +``` +$ wget https://github.com/NCAR/wrf_hydro_nwm_public/archive/refs/tags/v5.4.0.tar.gz +$ tar zxf v5.4.0.tar.gz +$ cd wrf_hydro_nwm_public-5.3.0/src +``` - ./configure +2. Compile using CMake -4. Compile +Create build directory, configure with CMake, and then compile. +See the [CMake Build link](https://github.com/NCAR/wrf_hydro_nwm_public/blob/main/docs/BUILD.md#cmake-build) +for a table of WRF-Hydro specific configuration options. +The user can enable debug mode, nudging, etc. - To compile the model run the compile script with the name corresponding to the - land surface model (i.e. Noah or NoahMP) you would like to be utilized within - WRF-Hydro and pass the file containing the WRF-Hydro environment variables / - compile time options as an argument: +``` +$ mkdir build +$ cd build +$ cmake .. +$ make -j 4 +``` - ./compile_offline_Noah.sh setEnvar.sh - OR - ./compile_offline_NoahMP.sh setEnvar.sh +This should result in the creation of a 'Run' directory populated with the +appropriate template parameter tables and namelists for the land surface model +selected as well as a model executable that is then symlinked to wrf_hydro.exe. - This should result in the creation of a 'Run' directory populated with the - appropriate template parameter tables and namelists for the land surface model - selected as well as a model executable that is then symlinked to wrf_hydro.exe. +Note that, as mentioned above, passing the environment variable file as an +argument to the compile script is optional. However, if this is not passed the +desired environment variables must be set prior to running the compile script. - Note that, as mentioned above, passing the environment variable file as an argument - to the compile script is optional. However, if this is not passed the desired - environment variables must be set prior to running the compile script. diff --git a/tests/README.md b/tests/README.md index 91677aea3..2b9745825 100644 --- a/tests/README.md +++ b/tests/README.md @@ -83,7 +83,7 @@ Examples are provided in `tests/local/examples`. At this time: ``` -james@vpn35[609]:~/WRF_Hydro/wrf_hydro_nwm_public/tests/local> python run_tests.py --help +$ python run_tests.py --help usage: run_tests.py [-h] --config CONFIG [CONFIG ...] --compiler COMPILER --output_dir OUTPUT_DIR --candidate_dir CANDIDATE_DIR --reference_dir REFERENCE_DIR [--domain_dir DOMAIN_DIR] @@ -120,13 +120,13 @@ optional arguments: convention is that the exe is always named wrf_hydro.exe. --ncores NCORES Number of cores to use for testing - --scheduler Scheduler to use for testing, options are PBSCheyenne + --scheduler Scheduler to use for testing, options are PBSDerecho or do not specify for no scheduler --nnodes NNODES Number of nodes to use for testing if running on scheduler --account ACCOUNT Account number to use if using a scheduler. --walltime WALLTIME Account number to use if using a scheduler. - --queue QUEUE Queue to use if running on NCAR Cheyenne, options are + --queue QUEUE Queue to use if running on NCAR Derecho, options are regular, premium, or shared --print Print log to stdout instead of html --pdb pdb (debug) in pytest @@ -141,8 +141,8 @@ optional arguments: ## Croton example Many docker-related details aside, this is essentially how the Croton Continuous-Inegration domain is run inside a docker container: ``` -cd ~/wrf_hydro_nwm_public/tests/local -python run_tests.py \ +$ cd ~/wrf_hydro_nwm_public/tests/local +$ python run_tests.py \ --config nwm_ana nwm_long_range reach gridded --compiler gfort \ --output_dir /home/docker/test_out \ @@ -168,16 +168,15 @@ The following two envionments come "ready to go": The two containers [`wrfhydro/dev:conda`](https://github.com/NCAR/wrf_hydro_docker/blob/main/dev/conda/Dockerfile) and [`wrfhydro/dev:modeltesting`](https://github.com/NCAR/wrf_hydro_docker/blob/main/dev/modeltesting/Dockerfile) contain the full software stack required to run testing. -## Cheyenne -To activate a common python virtual envionment for model testing on cheyenne: +## Derecho +To activate a common python virtual envionment for model testing on Derecho: ``` -(368) jamesmcc@cheyenne3[999]:~> deactivate -jamesmcc@cheyenne3[1000]:~> source /glade/p/cisl/nwc/model_testing_env/wrf_hydro_nwm_test/bin/activate -(wrf_hydro_nwm_test) jamesmcc@cheyenne3[1001]:~> +$ deactivate +$ source /glade/p/cisl/nwc/model_testing_env/wrf_hydro_nwm_test/bin/activate ``` -Because Whole new levels of testing complexity open up on cheyenne, there is a +Because Whole new levels of testing complexity open up on Derecho, there is a special script to handle this with minimal pain: -`test/local/cheyenne/model_test.sh`. This script provides flexibility to +`test/local/derecho/model_test.sh`. This script provides flexibility to switch compilers, MPI distributions, and domains. With MPI distributions, different model execution commands may be required. Furthermore, output comparison on large domains is better handled by `xrcmp` in `wrfhydropy`. @@ -186,11 +185,9 @@ comparison on large domains is better handled by `xrcmp` in `wrfhydropy`. # The Croton domain A lovely watershed with some very lovely lakes, I am sure as I hope to visit it some day. As a test domain, it has served us marvelously. To pull the domain -from the cloud: +from the WRF-Hydro release: ``` -cd /your/path/to/wrf_hydro_nwm_public/tests/local/utils -python gdrive_download.py --file_id 1xFYB--zm9f8bFHESzgP5X5i7sZryQzJe --dest_file ~/croton_NY.tar.gz -cd ~ -tar xzf croton_NY.tar.gz -mv example_case croton_NY ## we thought the generic name would be useful. +$ wget https://github.com/NCAR/wrf_hydro_nwm_public/releases/download/v5.4.0/croton_NY_training_example_v5.4.tar.gz +$ tar zxf croton_NY_training_example_v5.4.tar.gz +$ mv example_case croton_NY ## we thought the generic name would be useful. ``` diff --git a/tests/config_file_meta/get_config_file_meta_data.py b/tests/config_file_meta/get_config_file_meta_data.py index ba3537b5e..29bff9b02 100644 --- a/tests/config_file_meta/get_config_file_meta_data.py +++ b/tests/config_file_meta/get_config_file_meta_data.py @@ -7,7 +7,7 @@ # Example: python get_config_file_meta_data # # 1) Run script file in-place, in this directory, as above. -# 2) Typically, this will be run on cheyenne since that's where CONUS domain +# 2) Typically, this will be run on Derecho since that's where CONUS domain # files will live. # 3) Configure the list of domains paths and configs below. # 4) The dirs in this directory must be removed to be refreshed. diff --git a/tests/conftest.py b/tests/conftest.py index 3c7d9079c..35bfa1892 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -74,7 +74,7 @@ def pytest_addoption(parser): parser.addoption( '--scheduler', action='store_true', - help='Use PBS scheduler on cheyenne' + help='Use PBS scheduler on Derecho' ) parser.addoption( @@ -105,7 +105,7 @@ def pytest_addoption(parser): default='regular', required=False, action='store', - help='Queue to use if running on NCAR Cheyenne, options are regular, ' + help='Queue to use if running on NCAR Derecho, options are regular, ' 'premium, or shared' ) @@ -170,7 +170,7 @@ def _make_sim( if scheduler: sim.add( - wrfhydropy.schedulers.PBSCheyenne( + wrfhydropy.schedulers.PBSDerecho( account=account, nproc=int(ncores), nnodes=int(nnodes), diff --git a/tests/local/README.md b/tests/local/README.md index c74c1db56..01b47e2ad 100644 --- a/tests/local/README.md +++ b/tests/local/README.md @@ -1,18 +1,16 @@ tests/local =========== - -# Purpose -Where local testing happens. +Where local testing happens. # run_tests.py -This is the commandline interface to pytest and running the tests. +This is the commandline interface to pytest and running the tests. `python run_tests.py --help` for more information See ../README.md for more context. # requirements.txt -These are essentially the requirements of wrf\_hydro\_py repeated here for minimal confusion. +These are essentially the requirements of wrf\_hydro\_py repeated here for minimal confusion. -# cheyenne -Scripting for cheyenne, including scheduling. Examples in the help. +# Derecho +Scripting for Derecho, including scheduling. Examples in the help. diff --git a/tests/local/derecho/example_croton_test.sh b/tests/local/derecho/example_croton_test.sh index eb7cb4540..5c1f5cc19 100755 --- a/tests/local/derecho/example_croton_test.sh +++ b/tests/local/derecho/example_croton_test.sh @@ -1,14 +1,14 @@ #!/bin/bash ./model_test.sh \ - -c /glade/u/home/jamesmcc/WRF_Hydro/wrf_hydro_nwm_public \ - -r /glade/u/home/jamesmcc/WRF_Hydro/.wrf_hydro_nwm_public_REFERENCE \ + -c path/to/wrf_hydro_nwm_public \ + -r path/to/wrf_hydro_nwm_public_REFERENCE \ --compiler=ifort \ --mpi=impi \ --config='nwm_ana' \ --ncores=6 --queue=share \ --reference_update=false \ - --domain_dir /glade/work/jamesmcc/domains/public/croton_NY + --domain_dir path/to/croton_NY # Can be added # --use_existing_test_dir diff --git a/tests/local/derecho/model_test.sh b/tests/local/derecho/model_test.sh index 1f193d03e..0f07ceca8 100755 --- a/tests/local/derecho/model_test.sh +++ b/tests/local/derecho/model_test.sh @@ -1,7 +1,5 @@ #!/bin/bash -# A first productiony test script for cheyenne. -# James McCreight -# April 5, 2019 +# A first productiony test script for Derecho. # Why bash? # 1) might need to establish the python env from scratch, # 2) set the modules is much easier @@ -45,9 +43,8 @@ Options: --walltime: default=01:00:00 --domain_dir: - default=/glade/work/jamesmcc/domains/private/CONUS The domain must be properly constructed with domain-side json namelist patch files and a - .version file. Most domains in /glade/work/jamesmcc/domains fit that criteria. + .version file. --use_existing_test_dir default is not included. Lets testing proceede with existing output in place. --xrcmp_n_cores @@ -71,7 +68,7 @@ Usage Examples: --compiler=ifort --mpi=impi \\ --config='nwm_ana nwm_long_range gridded reach' \\ --ncores=6 --queue=share \\ - --domain_dir /glade/work/jamesmcc/domains/public/croton_NY + --domain_dir path/to/croton_NY # Non-standard mpi/exe_cmd example. A croton test of nwm_ana configuration. # An strenuous exercise in bash escaping. @@ -82,7 +79,7 @@ Usage Examples: --exe_cmd=\"mpiexec_mpt $'\\\$(hostname)' -np \\\$ncores ./wrf_hydro.exe\" \\ --config='nwm_ana' \\ --ncores=6 --queue=share \\ - --domain_dir /glade/work/jamesmcc/domains/public/croton_NY + --domain_dir path/to/croton_NY " ## Default options @@ -96,7 +93,7 @@ queue=regular account=NRAL0017 walltime=01:00:00 reference_update=true -domain_dir=/glade/work/jamesmcc/domains/private/CONUS +domain_dir=path/to/CONUS use_existing_test_dir='' xrcmp_n_cores=0 @@ -196,8 +193,8 @@ if [[ $reference_update == 'true' ]]; then echo; echo printf "\e[7;49;94mUpdate the reference repository with NCAR/$branch_name\e[0m\n" cd $reference_dir || exit 9 - if [[ `hostname` != *cheyenne* ]]; then - ssh cheyenne1 "cd $reference_dir && git fetch upstream" || exit 9 + if [[ `hostname` != *derecho* ]]; then + ssh derecho1 "cd $reference_dir && git fetch upstream" || exit 9 else cd $reference_dir || exit 9 git fetch upstream || exit 9 diff --git a/tests/local/examples/ex2_run_conus_derecho.sh b/tests/local/examples/ex2_run_conus_derecho.sh index 73bd51d6e..812ac4073 100644 --- a/tests/local/examples/ex2_run_conus_derecho.sh +++ b/tests/local/examples/ex2_run_conus_derecho.sh @@ -1,16 +1,13 @@ #!/usr/bin/env bash -# These paths point to a preconfigured conda environment. Likewise, you can install your own -# using the utils/nwm_testing.yml conda environment file. -export PATH="/glade/work/jmills/nwm_testing/miniconda3/envs/nwm_testing/bin:$PATH" -export PYTHONPATH="/glade/work/jmills/nwm_testing/miniconda3/envs/nwm_testing/lib/python3.7/site-packages" +# Load conda environment -# Run the tests using a scheduler with default nnodes of6 and ncores of 216 +# Run the tests using a scheduler python /glade/scratch/$USER/wrf_hydro_nwm_public/tests/local/run_tests.py \ --config nwm_ana nwm_long_range gridded reach \ --compiler ifort \ ---output_dir /glade/scratch/$USER/test_out \ ---candidate_dir /glade/scratch/$USER/wrf_hydro_nwm_public/ \ ---reference_dir /glade/scratch/$USER/wrf_hydro_nwm_public/ \ ---domain_dir /glade/work/jamesmcc/domains/private/CONUS \ +--output_dir /glade/derecho/scratch/$USER/test_out \ +--candidate_dir /glade/derecho/scratch/$USER/wrf_hydro_nwm_public/ \ +--reference_dir /glade/derecho/scratch/$USER/wrf_hydro_nwm_public/ \ +--domain_dir path/to/CONUS_domain \ --scheduler diff --git a/tests/local/run_tests.py b/tests/local/run_tests.py index 1703456b5..0b47742ea 100755 --- a/tests/local/run_tests.py +++ b/tests/local/run_tests.py @@ -40,11 +40,11 @@ def run_tests( candidate_dir: The wrf-hydro code candidate directory to use, e.g. wrf_hydro_nwm_public reference_dir: The wrf-hydro code directory to use, e.g. wrf_hydro_nwm_public output_dir: The directory to hold test outputs - scheduler: Use PBSCheyenne scheduler? + scheduler: Use PBSDerecho scheduler? exe_cmd: Optional. The MPI dependent run command which zeroth variable for ncores. - ncores: Optional. The number of cores to use if running on cheyenne - nnodes: Optional. The number of nodes to use if running on cheyenne - account: Options. The account number to use if running on cheyenne + ncores: Optional. The number of cores to use if running on Derecho + nnodes: Optional. The number of nodes to use if running on Derecho + account: Options. The account number to use if running on Derecho walltime: Optional. Walltime for scheduler queue: Optional, queue to use for scheduler print_log: Optional, print text logs instead of HTML logs @@ -57,10 +57,10 @@ def run_tests( candidate_source_dir = candidate_dir + '/src' reference_source_dir = reference_dir + '/src' - # Load modules and override nnodes/ncores if running on cheyenne + # Load modules and override nnodes/ncores if running on Derecho hostname = socket.gethostname() module_cmd = '' - if 'cheyenne' in hostname: + if 'Derecho' in hostname: module_cmd = 'echo; echo "Using the following modules for testing:" ; module list; echo;' # HTML report @@ -199,7 +199,7 @@ def main(): '--scheduler', required=False, action='store_true', - help='Scheduler to use for testing, options are PBSCheyenne or do not ' + help='Scheduler to use for testing, options are PBSDerecho or do not ' 'specify for no scheduler' ) @@ -231,7 +231,7 @@ def main(): default='regular', required=False, action='store', - help='Queue to use if running on NCAR Cheyenne, options are regular, ' + help='Queue to use if running on NCAR Derecho, options are regular, ' 'premium, or shared' ) diff --git a/tests/utilities.py b/tests/utilities.py index ef9800584..5ecb5c6bd 100644 --- a/tests/utilities.py +++ b/tests/utilities.py @@ -131,11 +131,11 @@ def make_sim(domain_dir, pass if scheduler: - sim.add(schedulers.PBSCheyenne(account=account, - nproc=int(ncores), - nnodes=int(nnodes), - walltime=walltime, - queue=queue)) + sim.add(schedulers.PBSDerecho(account=account, + nproc=int(ncores), + nnodes=int(nnodes), + walltime=walltime, + queue=queue)) # Channel and bucket mode is forc_typ = 10. if channel_only: