diff --git a/CMakeLists.txt b/CMakeLists.txt index ed34916b3..125c17fa9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,7 +40,40 @@ if(NOT PARALLEL_NETCDF) endif() if(MOVING_NEST) - list(APPEND _fv3atm_defs_private MOVING_NEST) + list(APPEND _fv3atm_defs_private MOVING_NEST MOIST_CAPPA USE_COND) + if(DEBUG) + list(APPEND _fv3atm_defs_private DEBUG) + endif() + if(GFS_PHYS) + list(APPEND _fv3atm_defs_private GFS_PHYS) + endif() + if(GFS_TYPES) + list(APPEND _fv3atm_defs_private GFS_TYPES) + endif() + if(USE_GFSL63) + list(APPEND _fv3atm_defs_private USE_GFSL63) + endif() + if(INTERNAL_FILE_NML) + list(APPEND _fv3atm_defs_private INTERNAL_FILE_NML) + endif() + if(ENABLE_QUAD_PRECISION) + list(APPEND _fv3atm_defs_private ENABLE_QUAD_PRECISION) + endif() + if(32BIT) + list(APPEND _fv3atm_defs_private OVERLOAD_R4 OVERLOAD_R8) + endif() + + list(APPEND moving_nest_srcs + moving_nest/bounding_box.F90 + moving_nest/fv_tracker.F90 + moving_nest/fv_moving_nest.F90 + moving_nest/fv_moving_nest_main.F90 + moving_nest/fv_moving_nest_physics.F90 + moving_nest/fv_moving_nest_types.F90 + moving_nest/fv_moving_nest_utils.F90 + ) +else() + list(APPEND moving_nest_srcs "") endif() add_library(fv3atm @@ -57,6 +90,7 @@ add_library(fv3atm io/module_fv3_io_def.F90 io/module_write_internal_state.F90 io/module_wrt_grid_comp.F90 + ${moving_nest_srcs} ${POST_SRC} ) diff --git a/atmos_cubed_sphere b/atmos_cubed_sphere index 2cdf37bc9..a839395d5 160000 --- a/atmos_cubed_sphere +++ b/atmos_cubed_sphere @@ -1 +1 @@ -Subproject commit 2cdf37bc9c17517fa07936585130f722fdb4a757 +Subproject commit a839395d542ab860ae0afa7601e35b37d68d773e diff --git a/atmos_model.F90 b/atmos_model.F90 index 35e7328ac..81a5272c3 100644 --- a/atmos_model.F90 +++ b/atmos_model.F90 @@ -103,7 +103,11 @@ module atmos_model_mod block_data_combine_fractions #ifdef MOVING_NEST -use fv_moving_nest_main_mod, only: update_moving_nest, dump_moving_nest +use fv_moving_nest_main_mod, only: update_moving_nest, dump_moving_nest +use fv_moving_nest_main_mod, only: nest_tracker_init +use fv_moving_nest_main_mod, only: moving_nest_end, nest_tracker_end +use fv_moving_nest_types_mod, only: fv_moving_nest_init +use fv_tracker_mod, only: check_is_moving_nest, execute_tracker #endif !----------------------------------------------------------------------- @@ -132,6 +136,7 @@ module atmos_model_mod logical :: nested ! true if there is a nest logical :: moving_nest_parent ! true if this grid has a moving nest child logical :: is_moving_nest ! true if this is a moving nest grid + logical :: isAtCapTime ! true if currTime is at the cap driverClock's currTime integer :: ngrids ! integer :: mygrid ! integer :: mlon, mlat @@ -296,7 +301,7 @@ subroutine update_atmos_radiation_physics (Atmos) ! receives coupled fields through the above assign_importdata step. Thus, ! an extra step is needed to fill the coupling variables in the nest, ! by downscaling the coupling variables from its parent. - if (Atmos%ngrids > 1) then + if (Atmos%isAtCapTime .and. Atmos%ngrids > 1) then if (GFS_control%cplocn2atm .or. GFS_control%cplwav2atm) then call atmosphere_fill_nest_cpl(Atm_block, GFS_control, GFS_data) endif @@ -540,6 +545,7 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step) !---- set the atmospheric model time ------ + Atmos % isAtCapTime = .false. Atmos % Time_init = Time_init Atmos % Time = Time Atmos % Time_step = Time_step @@ -552,14 +558,21 @@ subroutine atmos_model_init (Atmos, Time_init, Time, Time_step) !---------- (need name of CCPP suite definition file from input.nml) --------- call atmosphere_init (Atmos%Time_init, Atmos%Time, Atmos%Time_step,& Atmos%grid, Atmos%area) - +#ifdef MOVING_NEST + call fv_moving_nest_init(Atm, mygrid) + call nest_tracker_init() +#endif !----------------------------------------------------------------------- call atmosphere_resolution (nlon, nlat, global=.false.) call atmosphere_resolution (mlon, mlat, global=.true.) call atmosphere_domain (Atmos%domain, Atmos%domain_for_read, Atmos%layout, & Atmos%regional, Atmos%nested, & - Atmos%moving_nest_parent, Atmos%is_moving_nest, & Atmos%ngrids, Atmos%mygrid, Atmos%pelist) + Atmos%moving_nest_parent = .false. + Atmos%is_moving_nest = .false. +#ifdef MOVING_NEST + call check_is_moving_nest(Atm, Atmos%mygrid, Atmos%ngrids, Atmos%is_moving_nest, Atmos%moving_nest_parent) +#endif call atmosphere_diag_axes (Atmos%axes) call atmosphere_etalvls (Atmos%ak, Atmos%bk, flip=flip_vc) @@ -929,6 +942,9 @@ subroutine update_atmos_model_state (Atmos, rc) call mpp_clock_begin(fv3Clock) call mpp_clock_begin(updClock) call atmosphere_state_update (Atmos%Time, GFS_data, IAU_Data, Atm_block, flip_vc) +#ifdef MOVING_NEST + call execute_tracker(Atm, mygrid, Atmos%Time, Atmos%Time_step) +#endif call mpp_clock_end(updClock) call mpp_clock_end(fv3Clock) @@ -1031,6 +1047,14 @@ subroutine atmos_model_end (Atmos) !----------------------------------------------------------------------- !---- termination routine for atmospheric model ---- +#ifdef MOVING_NEST + ! Call this before atmosphere_end(), because that deallocates Atm + if (Atmos%is_moving_nest) then + call moving_nest_end() + call nest_tracker_end() + endif +#endif + call atmosphere_end (Atmos % Time, Atmos%grid, restart_endfcst) if(restart_endfcst) then diff --git a/ccpp/data/GFS_typedefs.F90 b/ccpp/data/GFS_typedefs.F90 index 41741d236..fdbeddb9b 100644 --- a/ccpp/data/GFS_typedefs.F90 +++ b/ccpp/data/GFS_typedefs.F90 @@ -1180,6 +1180,7 @@ module GFS_typedefs real(kind=kind_phys) :: rlmx !< maximum allowed mixing length in boundary layer mass flux scheme real(kind=kind_phys) :: elmx !< maximum allowed dissipation mixing length in boundary layer mass flux scheme integer :: sfc_rlm !< choice of near surface mixing length in boundary layer mass flux scheme + integer :: tc_pbl !< control for TC applications in the PBL scheme !--- parameters for canopy heat storage (CHS) parameterization real(kind=kind_phys) :: h0facu !< CHS factor for sensible heat flux in unstable surface layer @@ -3363,6 +3364,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & real(kind=kind_phys) :: rlmx = 300. !< maximum allowed mixing length in boundary layer mass flux scheme real(kind=kind_phys) :: elmx = 300. !< maximum allowed dissipation mixing length in boundary layer mass flux scheme integer :: sfc_rlm = 0 !< choice of near surface mixing length in boundary layer mass flux scheme + integer :: tc_pbl = 0 !< control for TC applications in the PBL scheme !--- parameters for canopy heat storage (CHS) parameterization real(kind=kind_phys) :: h0facu = 0.25 @@ -3566,7 +3568,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & thsfc_loc, & ! vertical diffusion xkzm_m, xkzm_h, xkzm_s, xkzminv, moninq_fac, dspfac, & - bl_upfr, bl_dnfr, rlmx, elmx, sfc_rlm, & + bl_upfr, bl_dnfr, rlmx, elmx, sfc_rlm, tc_pbl, & !--- canopy heat storage parameterization h0facu, h0facs, & !--- cellular automata @@ -4380,6 +4382,7 @@ subroutine control_initialize (Model, nlunit, fn_nml, me, master, & Model%rlmx = rlmx Model%elmx = elmx Model%sfc_rlm = sfc_rlm + Model%tc_pbl = tc_pbl !--- canopy heat storage parametrization Model%h0facu = h0facu @@ -6067,6 +6070,7 @@ subroutine control_print(Model) print *, ' rlmx : ', Model%rlmx print *, ' elmx : ', Model%elmx print *, ' sfc_rlm : ', Model%sfc_rlm + print *, ' tc_pbl : ', Model%tc_pbl print *, ' ' print *, 'parameters for canopy heat storage parametrization' print *, ' h0facu : ', Model%h0facu diff --git a/ccpp/data/GFS_typedefs.meta b/ccpp/data/GFS_typedefs.meta index 42d5ef29e..38be8e94e 100644 --- a/ccpp/data/GFS_typedefs.meta +++ b/ccpp/data/GFS_typedefs.meta @@ -5001,6 +5001,12 @@ units = none dimensions = () type = integer +[tc_pbl] + standard_name = control_for_TC_applications_in_the_PBL_scheme + long_name = control for TC applications in the PBL scheme + units = none + dimensions = () + type = integer [h0facu] standard_name = multiplicative_tuning_parameter_for_reduced_surface_heat_fluxes_due_to_canopy_heat_storage long_name = canopy heat storage factor for sensible heat flux in unstable surface layer diff --git a/ccpp/physics b/ccpp/physics index befc3364d..688c7715a 160000 --- a/ccpp/physics +++ b/ccpp/physics @@ -1 +1 @@ -Subproject commit befc3364d35172cc6133a5b0f883e9d2c0bac5a2 +Subproject commit 688c7715a026b1faeba8fc2350ddcd7a51b3cabf diff --git a/ccpp/suites/suite_FV3_HAFS_v0_gfdlmp_tedmf.xml b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf.xml similarity index 98% rename from ccpp/suites/suite_FV3_HAFS_v0_gfdlmp_tedmf.xml rename to ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf.xml index cb94cc03a..1760afe97 100644 --- a/ccpp/suites/suite_FV3_HAFS_v0_gfdlmp_tedmf.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf.xml @@ -1,6 +1,6 @@ - + diff --git a/ccpp/suites/suite_FV3_HAFS_v0_gfdlmp_tedmf_nonsst.xml b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf_nonsst.xml similarity index 98% rename from ccpp/suites/suite_FV3_HAFS_v0_gfdlmp_tedmf_nonsst.xml rename to ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf_nonsst.xml index c588d8598..90693645c 100644 --- a/ccpp/suites/suite_FV3_HAFS_v0_gfdlmp_tedmf_nonsst.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_gfdlmp_tedmf_nonsst.xml @@ -1,6 +1,6 @@ - + diff --git a/ccpp/suites/suite_FV3_HAFS_v1_thompson.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson.xml new file mode 100644 index 000000000..0e7127bb4 --- /dev/null +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson.xml @@ -0,0 +1,91 @@ + + + + + + + GFS_time_vary_pre + GFS_rrtmg_setup + GFS_rad_time_vary + GFS_phys_time_vary + + + + + GFS_suite_interstitial_rad_reset + GFS_rrtmg_pre + GFS_radiation_surface + rad_sw_pre + rrtmg_sw + rrtmg_sw_post + rrtmg_lw_pre + rrtmg_lw + rrtmg_lw_post + GFS_rrtmg_post + + + + + GFS_suite_interstitial_phys_reset + GFS_suite_stateout_reset + get_prs_fv3 + GFS_suite_interstitial_1 + GFS_surface_generic_pre + GFS_surface_composites_pre + dcyc2t3 + GFS_surface_composites_inter + GFS_suite_interstitial_2 + + + + sfc_diff + GFS_surface_loop_control_part1 + sfc_nst_pre + sfc_nst + sfc_nst_post + lsm_noah + sfc_sice + GFS_surface_loop_control_part2 + + + + GFS_surface_composites_post + sfc_diag + sfc_diag_post + GFS_surface_generic_post + GFS_PBL_generic_pre + satmedmfvdifq + GFS_PBL_generic_post + GFS_GWD_generic_pre + cires_ugwp + cires_ugwp_post + GFS_GWD_generic_post + GFS_suite_stateout_update + ozphys_2015 + h2ophys + get_phi_fv3 + GFS_suite_interstitial_3 + GFS_DCNV_generic_pre + samfdeepcnv + GFS_DCNV_generic_post + GFS_SCNV_generic_pre + samfshalcnv + GFS_SCNV_generic_post + GFS_suite_interstitial_4 + cnvc90 + GFS_MP_generic_pre + mp_thompson_pre + mp_thompson + mp_thompson_post + GFS_MP_generic_post + maximum_hourly_diagnostics + phys_tend + + + + + GFS_stochastics + + + + diff --git a/ccpp/suites/suite_FV3_HAFS_v1_thompson_noahmp.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson_noahmp.xml new file mode 100644 index 000000000..8d3bab273 --- /dev/null +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson_noahmp.xml @@ -0,0 +1,91 @@ + + + + + + + GFS_time_vary_pre + GFS_rrtmg_setup + GFS_rad_time_vary + GFS_phys_time_vary + + + + + GFS_suite_interstitial_rad_reset + GFS_rrtmg_pre + GFS_radiation_surface + rad_sw_pre + rrtmg_sw + rrtmg_sw_post + rrtmg_lw_pre + rrtmg_lw + rrtmg_lw_post + GFS_rrtmg_post + + + + + GFS_suite_interstitial_phys_reset + GFS_suite_stateout_reset + get_prs_fv3 + GFS_suite_interstitial_1 + GFS_surface_generic_pre + GFS_surface_composites_pre + dcyc2t3 + GFS_surface_composites_inter + GFS_suite_interstitial_2 + + + + sfc_diff + GFS_surface_loop_control_part1 + sfc_nst_pre + sfc_nst + sfc_nst_post + noahmpdrv + sfc_sice + GFS_surface_loop_control_part2 + + + + GFS_surface_composites_post + sfc_diag + sfc_diag_post + GFS_surface_generic_post + GFS_PBL_generic_pre + satmedmfvdifq + GFS_PBL_generic_post + GFS_GWD_generic_pre + cires_ugwp + cires_ugwp_post + GFS_GWD_generic_post + GFS_suite_stateout_update + ozphys_2015 + h2ophys + get_phi_fv3 + GFS_suite_interstitial_3 + GFS_DCNV_generic_pre + samfdeepcnv + GFS_DCNV_generic_post + GFS_SCNV_generic_pre + samfshalcnv + GFS_SCNV_generic_post + GFS_suite_interstitial_4 + cnvc90 + GFS_MP_generic_pre + mp_thompson_pre + mp_thompson + mp_thompson_post + GFS_MP_generic_post + maximum_hourly_diagnostics + phys_tend + + + + + GFS_stochastics + + + + diff --git a/ccpp/suites/suite_FV3_HAFS_v1_thompson_noahmp_nonsst.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson_noahmp_nonsst.xml new file mode 100644 index 000000000..b61422021 --- /dev/null +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson_noahmp_nonsst.xml @@ -0,0 +1,89 @@ + + + + + + + GFS_time_vary_pre + GFS_rrtmg_setup + GFS_rad_time_vary + GFS_phys_time_vary + + + + + GFS_suite_interstitial_rad_reset + GFS_rrtmg_pre + GFS_radiation_surface + rad_sw_pre + rrtmg_sw + rrtmg_sw_post + rrtmg_lw_pre + rrtmg_lw + rrtmg_lw_post + GFS_rrtmg_post + + + + + GFS_suite_interstitial_phys_reset + GFS_suite_stateout_reset + get_prs_fv3 + GFS_suite_interstitial_1 + GFS_surface_generic_pre + GFS_surface_composites_pre + dcyc2t3 + GFS_surface_composites_inter + GFS_suite_interstitial_2 + + + + sfc_diff + GFS_surface_loop_control_part1 + sfc_ocean + noahmpdrv + sfc_sice + GFS_surface_loop_control_part2 + + + + GFS_surface_composites_post + sfc_diag + sfc_diag_post + GFS_surface_generic_post + GFS_PBL_generic_pre + satmedmfvdifq + GFS_PBL_generic_post + GFS_GWD_generic_pre + cires_ugwp + cires_ugwp_post + GFS_GWD_generic_post + GFS_suite_stateout_update + ozphys_2015 + h2ophys + get_phi_fv3 + GFS_suite_interstitial_3 + GFS_DCNV_generic_pre + samfdeepcnv + GFS_DCNV_generic_post + GFS_SCNV_generic_pre + samfshalcnv + GFS_SCNV_generic_post + GFS_suite_interstitial_4 + cnvc90 + GFS_MP_generic_pre + mp_thompson_pre + mp_thompson + mp_thompson_post + GFS_MP_generic_post + maximum_hourly_diagnostics + phys_tend + + + + + GFS_stochastics + + + + diff --git a/ccpp/suites/suite_FV3_HAFS_v1_thompson_nonsst.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson_nonsst.xml new file mode 100644 index 000000000..0cb393317 --- /dev/null +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson_nonsst.xml @@ -0,0 +1,89 @@ + + + + + + + GFS_time_vary_pre + GFS_rrtmg_setup + GFS_rad_time_vary + GFS_phys_time_vary + + + + + GFS_suite_interstitial_rad_reset + GFS_rrtmg_pre + GFS_radiation_surface + rad_sw_pre + rrtmg_sw + rrtmg_sw_post + rrtmg_lw_pre + rrtmg_lw + rrtmg_lw_post + GFS_rrtmg_post + + + + + GFS_suite_interstitial_phys_reset + GFS_suite_stateout_reset + get_prs_fv3 + GFS_suite_interstitial_1 + GFS_surface_generic_pre + GFS_surface_composites_pre + dcyc2t3 + GFS_surface_composites_inter + GFS_suite_interstitial_2 + + + + sfc_diff + GFS_surface_loop_control_part1 + sfc_ocean + lsm_noah + sfc_sice + GFS_surface_loop_control_part2 + + + + GFS_surface_composites_post + sfc_diag + sfc_diag_post + GFS_surface_generic_post + GFS_PBL_generic_pre + satmedmfvdifq + GFS_PBL_generic_post + GFS_GWD_generic_pre + cires_ugwp + cires_ugwp_post + GFS_GWD_generic_post + GFS_suite_stateout_update + ozphys_2015 + h2ophys + get_phi_fv3 + GFS_suite_interstitial_3 + GFS_DCNV_generic_pre + samfdeepcnv + GFS_DCNV_generic_post + GFS_SCNV_generic_pre + samfshalcnv + GFS_SCNV_generic_post + GFS_suite_interstitial_4 + cnvc90 + GFS_MP_generic_pre + mp_thompson_pre + mp_thompson + mp_thompson_post + GFS_MP_generic_post + maximum_hourly_diagnostics + phys_tend + + + + + GFS_stochastics + + + + diff --git a/ccpp/suites/suite_FV3_HAFS_v0_thompson_tedmf_gfdlsf.xml b/ccpp/suites/suite_FV3_HAFS_v1_thompson_tedmf_gfdlsf.xml similarity index 98% rename from ccpp/suites/suite_FV3_HAFS_v0_thompson_tedmf_gfdlsf.xml rename to ccpp/suites/suite_FV3_HAFS_v1_thompson_tedmf_gfdlsf.xml index 3e35b94a8..e68c9e687 100644 --- a/ccpp/suites/suite_FV3_HAFS_v0_thompson_tedmf_gfdlsf.xml +++ b/ccpp/suites/suite_FV3_HAFS_v1_thompson_tedmf_gfdlsf.xml @@ -1,6 +1,6 @@ - + diff --git a/module_fcst_grid_comp.F90 b/module_fcst_grid_comp.F90 index fb98dcd66..0fc3fff42 100644 --- a/module_fcst_grid_comp.F90 +++ b/module_fcst_grid_comp.F90 @@ -1157,6 +1157,9 @@ subroutine fcst_run_phase_1(fcst_comp, importState, exportState,clock,rc) ! !*** local variables ! + logical,save :: first=.true. + integer,save :: dt_cap=0 + type(ESMF_Time) :: currTime,stopTime integer :: mype, seconds real(kind=8) :: mpi_wtime, tbeg1 ! @@ -1174,6 +1177,22 @@ subroutine fcst_run_phase_1(fcst_comp, importState, exportState,clock,rc) call get_time(Atmos%Time - Atmos%Time_init, seconds) n_atmsteps = seconds/dt_atmos + + if (first) then + call ESMF_ClockGet(clock, currTime=currTime, stopTime=stopTime, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + call ESMF_TimeIntervalGet(stopTime-currTime, s=dt_cap, rc=rc) + if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return + + first=.false. + endif + + if ( dt_cap > 0 .and. mod(seconds, dt_cap) == 0 ) then + Atmos%isAtCapTime = .true. + else + Atmos%isAtCapTime = .false. + endif ! !----------------------------------------------------------------------- ! *** call fcst integration subroutines diff --git a/moving_nest/bounding_box.F90 b/moving_nest/bounding_box.F90 new file mode 100644 index 000000000..b2eab8b78 --- /dev/null +++ b/moving_nest/bounding_box.F90 @@ -0,0 +1,136 @@ +!*********************************************************************** +!* GNU General Public License * +!* This file is a part of fvGFS. * +!* * +!* fvGFS is free software; you can redistribute it and/or modify it * +!* and are expected to follow the terms of the GNU General Public * +!* License as published by the Free Software Foundation; either * +!* version 2 of the License, or (at your option) any later version. * +!* * +!* fvGFS is distributed in the hope that it will be useful, but * +!* WITHOUT ANY WARRANTY; without even the implied warranty of * +!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * +!* General Public License for more details. * +!* * +!* For the full text of the GNU General Public License, * +!* write to: Free Software Foundation, Inc., * +!* 675 Mass Ave, Cambridge, MA 02139, USA. * +!* or see: http://www.gnu.org/licenses/gpl.html * +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides subroutines for grid bounding boxes for moving nest +!! @author W. Ramstrom, AOML/HRD 07/28/2021 +!! @email William.Ramstrom@noaa.gov +!=======================================================================! + + +module bounding_box_mod + use mpp_domains_mod, only : mpp_get_C2F_index, nest_domain_type + use mpp_mod, only : mpp_pe + use fv_arrays_mod, only : R_GRID + +#ifdef GFS_TYPES + use GFS_typedefs, only : kind_phys +#else + use IPD_typedefs, only : kind_phys => IPD_kind_phys +#endif + + ! Simple aggregation of the start and end indices of a 2D grid + ! Makes argument lists clearer to read + type bbox + integer :: is, ie, js, je + end type bbox + + interface fill_bbox + module procedure fill_bbox_r4_2d + module procedure fill_bbox_r4_3d + module procedure fill_bbox_r4_4d + module procedure fill_bbox_r8_2d + module procedure fill_bbox_r8_3d + module procedure fill_bbox_r8_4d + end interface fill_bbox + +contains + + subroutine fill_bbox_r4_2d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*4, allocatable, intent(in) :: in_grid(:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r4_2d + + + subroutine fill_bbox_r4_3d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*4, allocatable, intent(in) :: in_grid(:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r4_3d + + subroutine fill_bbox_r4_4d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*4, allocatable, intent(in) :: in_grid(:,:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r4_4d + + + subroutine fill_bbox_r8_2d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*8, allocatable, intent(in) :: in_grid(:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r8_2d + + subroutine fill_bbox_r8_3d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*8, allocatable, intent(in) :: in_grid(:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r8_3d + + + subroutine fill_bbox_r8_4d(out_bbox, in_grid) + type(bbox), intent(out) :: out_bbox + real*8, allocatable, intent(in) :: in_grid(:,:,:,:) + + out_bbox%is = lbound(in_grid, 1) + out_bbox%ie = ubound(in_grid, 1) + out_bbox%js = lbound(in_grid, 2) + out_bbox%je = ubound(in_grid, 2) + end subroutine fill_bbox_r8_4d + + + !>@brief This subroutine returns the nest grid indices that correspond to the input nest domain, direction, and position + !>@details Simplifies the call signature with the bbox type rather than 4 separate integers + subroutine bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + implicit none + type(nest_domain_type), intent(in) :: nest_domain + type(bbox), intent(out) :: bbox_fine, bbox_coarse + integer, intent(in) :: direction, position + + integer :: nest_level = 1 ! TODO allow to vary + + call mpp_get_C2F_index(nest_domain, bbox_fine%is, bbox_fine%ie, bbox_fine%js, bbox_fine%je, & + bbox_coarse%is, bbox_coarse%ie, bbox_coarse%js, bbox_coarse%je, direction, nest_level, position=position) + + end subroutine bbox_get_C2F_index + +end module bounding_box_mod diff --git a/moving_nest/fv_moving_nest.F90 b/moving_nest/fv_moving_nest.F90 new file mode 100644 index 000000000..4e2ab8b59 --- /dev/null +++ b/moving_nest/fv_moving_nest.F90 @@ -0,0 +1,2515 @@ +!*********************************************************************** +!* GNU General Public License * +!* This file is a part of fvGFS. * +!* * +!* fvGFS is free software; you can redistribute it and/or modify it * +!* and are expected to follow the terms of the GNU General Public * +!* License as published by the Free Software Foundation; either * +!* version 2 of the License, or (at your option) any later version. * +!* * +!* fvGFS is distributed in the hope that it will be useful, but * +!* WITHOUT ANY WARRANTY; without even the implied warranty of * +!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * +!* General Public License for more details. * +!* * +!* For the full text of the GNU General Public License, * +!* write to: Free Software Foundation, Inc., * +!* 675 Mass Ave, Cambridge, MA 02139, USA. * +!* or see: http://www.gnu.org/licenses/gpl.html * +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides Moving Nest functionality in FV3 dynamic core +!! @author W. Ramstrom, AOML/HRD 01/15/2021 +!! @email William.Ramstrom@noaa.gov +!=======================================================================! + + +!=======================================================================! +! +! Notes +! +!------------------------------------------------------------------------ +! Moving Nest Subroutine Naming Convention +!----------------------------------------------------------------------- +! +! mn_meta_* subroutines perform moving nest operations for FV3 metadata. +! These routines will run only once per nest move. +! +! mn_var_* subroutines perform moving nest operations for an individual FV3 variable. +! These routines will run many times per nest move. +! +! mn_prog_* subroutines perform moving nest operations for the list of prognostic fields. +! These routines will run only once per nest move. +! +! mn_phys_* subroutines perform moving nest operations for the list of physics fields. +! These routines will run only once per nest move. +! +! =======================================================================! + +#define REMAP 1 + +module fv_moving_nest_mod + + use block_control_mod, only : block_control_type + use fms_mod, only : mpp_clock_id, mpp_clock_begin, mpp_clock_end, CLOCK_ROUTINE, clock_flag_default, CLOCK_SUBCOMPONENT + use mpp_mod, only : mpp_pe, mpp_sync, mpp_sync_self, mpp_send, mpp_error, NOTE, FATAL + use mpp_domains_mod, only : mpp_update_domains, mpp_get_data_domain, mpp_get_global_domain + use mpp_domains_mod, only : mpp_define_nest_domains, mpp_shift_nest_domains, nest_domain_type, domain2d + use mpp_domains_mod, only : mpp_get_C2F_index, mpp_update_nest_fine + use mpp_domains_mod, only : mpp_get_F2C_index, mpp_update_nest_coarse + use mpp_domains_mod, only : NORTH, SOUTH, EAST, WEST, CORNER, CENTER + use mpp_domains_mod, only : NUPDATE, SUPDATE, EUPDATE, WUPDATE, DGRID_NE + +#ifdef GFS_TYPES + use GFS_typedefs, only: IPD_data_type => GFS_data_type, & + IPD_control_type => GFS_control_type, kind_phys +#else + use IPD_typedefs, only: IPD_data_type, IPD_control_type, kind_phys => IPD_kind_phys +#endif + use GFS_init, only: GFS_grid_populate + + use boundary_mod, only: update_coarse_grid, update_coarse_grid_mpp + use bounding_box_mod, only: bbox, bbox_get_C2F_index, fill_bbox + use constants_mod, only: cp_air, omega, rdgas, grav, rvgas, kappa, pstd_mks, hlv + use field_manager_mod, only: MODEL_ATMOS + use fv_arrays_mod, only: fv_atmos_type, fv_nest_type, fv_grid_type, R_GRID + use fv_arrays_mod, only: allocate_fv_nest_bc_type, deallocate_fv_nest_bc_type + use fv_grid_tools_mod, only: init_grid + use fv_grid_utils_mod, only: grid_utils_init, ptop_min, dist2side_latlon + use fv_mapz_mod, only: Lagrangian_to_Eulerian, moist_cv, compute_total_energy + use fv_nesting_mod, only: dealloc_nested_buffers + use fv_nwp_nudge_mod, only: do_adiabatic_init + use init_hydro_mod, only: p_var + use tracer_manager_mod, only: get_tracer_index, get_tracer_names + use fv_moving_nest_types_mod, only: fv_moving_nest_prog_type, fv_moving_nest_physics_type, Moving_nest + use fv_moving_nest_utils_mod, only: alloc_halo_buffer, load_nest_latlons_from_nc, grid_geometry, output_grid_to_nc + use fv_moving_nest_utils_mod, only: fill_nest_from_buffer, fill_nest_from_buffer_cell_center, fill_nest_from_buffer_nearest_neighbor + use fv_moving_nest_utils_mod, only: fill_nest_halos_from_parent, fill_grid_from_supergrid, fill_weight_grid + use fv_moving_nest_utils_mod, only: alloc_read_data + + implicit none + +#ifdef NO_QUAD_PRECISION + ! 64-bit precision (kind=8) + integer, parameter:: f_p = selected_real_kind(15) +#else + ! Higher precision (kind=16) for grid geometrical factors: + integer, parameter:: f_p = selected_real_kind(20) +#endif + +#ifdef OVERLOAD_R4 + real, parameter:: real_snan=x'FFBFFFFF' +#else + real, parameter:: real_snan=x'FFF7FFFFFFFFFFFF' +#endif + + logical :: debug_log = .false. + +#include + + !! Step 2 + interface mn_var_fill_intern_nest_halos + module procedure mn_var_fill_intern_nest_halos_r4_2d + module procedure mn_var_fill_intern_nest_halos_r4_3d + module procedure mn_var_fill_intern_nest_halos_r4_4d + + module procedure mn_var_fill_intern_nest_halos_r8_2d + module procedure mn_var_fill_intern_nest_halos_r8_3d + module procedure mn_var_fill_intern_nest_halos_r8_4d + + module procedure mn_var_fill_intern_nest_halos_wind + end interface mn_var_fill_intern_nest_halos + + + !! Step 6 + interface mn_var_shift_data + module procedure mn_var_shift_data_r4_2d + module procedure mn_var_shift_data_r4_3d + module procedure mn_var_shift_data_r4_4d + + module procedure mn_var_shift_data_r8_2d + module procedure mn_var_shift_data_r8_3d + module procedure mn_var_shift_data_r8_4d + end interface mn_var_shift_data + + !! Step 8 + interface mn_var_dump_to_netcdf + module procedure mn_var_dump_2d_to_netcdf + module procedure mn_var_dump_3d_to_netcdf + end interface mn_var_dump_to_netcdf + + interface mn_static_read_hires + module procedure mn_static_read_hires_r4 + module procedure mn_static_read_hires_r8 + end interface mn_static_read_hires + +contains + + !!===================================================================================== + !! Step 1.9 -- Allocate and fill the temporary variable(s) + !! This is to manage variables that are not allocated with a halo + !! on the Atm structure + !!===================================================================================== + + !>@brief The subroutine 'mn_prog_fill_temp_variables' fills the temporary variable for delz + !>@details The delz variable does not have haloes so we need a temporary variable to move it. + subroutine mn_prog_fill_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(in) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n, child_grid_num !< This level and nest level + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: isd, ied, jsd, jed + integer :: is, ie, js, je + integer :: this_pe + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + this_pe = mpp_pe() + + isd = Atm(n)%bd%isd + ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd + jed = Atm(n)%bd%jed + + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + ! Reset this to a dummy value, to help flag if the halos don't get updated later. + mn_prog%delz = +99999.9 + mn_prog%delz(is:ie, js:je, 1:npz) = Atm(n)%delz(is:ie, js:je, 1:npz) + + end subroutine mn_prog_fill_temp_variables + + !>@brief The subroutine 'mn_prog_apply_temp_variables' fills the Atm%delz value from the temporary variable after nest move + !>@details The delz variable does not have haloes so we need a temporary variable to move it. + subroutine mn_prog_apply_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n, child_grid_num !< This level and nest level + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: is, ie, js, je + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + if (is_fine_pe) then + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + Atm(n)%delz(is:ie, js:je, 1:npz) = mn_prog%delz(is:ie, js:je, 1:npz) + endif + + end subroutine mn_prog_apply_temp_variables + + + !!===================================================================================== + !! Step 2 -- Fill the nest edge halos from parent grid before nest motion + !! OR Refill the nest edge halos from parent grid after nest motion + !! Parent and nest PEs need to execute these subroutines + !!===================================================================================== + + !>@brief The subroutine 'mn_prog_fill_nest_halos_from_parent' fills the nest edge halos from the parent + !>@details Parent and nest PEs must run this subroutine. It transfers data and interpolates onto fine nest. + subroutine mn_prog_fill_nest_halos_from_parent(Atm, n, child_grid_num, is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n, child_grid_num !< This level and nest level + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Domain structure for nest + integer, intent(in) :: nz !< Number of vertical levels + + integer :: position, position_u, position_v + integer :: interp_type, interp_type_u, interp_type_v + integer :: x_refine, y_refine + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + ! TODO Rename this from interp_type to stagger_type + interp_type = 1 ! cell-centered A-grid + interp_type_u = 4 ! D-grid + interp_type_v = 4 ! D-grid + + position = CENTER + position_u = NORTH + position_v = EAST + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + + ! Fill centered-grid variables + call fill_nest_halos_from_parent("q_con", Atm(n)%q_con, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("pt", Atm(n)%pt, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("w", Atm(n)%w, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + !call fill_nest_halos_from_parent("omga", Atm(n)%omga, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("delp", Atm(n)%delp, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("delz", mn_prog%delz, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("q", Atm(n)%q, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + ! Move the A-grid winds. TODO consider recomputing them from D grid instead + call fill_nest_halos_from_parent("ua", Atm(n)%ua, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + call fill_nest_halos_from_parent("va", Atm(n)%va, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + ! Fill staggered D-grid variables + call fill_nest_halos_from_parent("u", Atm(n)%u, interp_type_u, Atm(child_grid_num)%neststruct%wt_u, & + Atm(child_grid_num)%neststruct%ind_u, x_refine, y_refine, is_fine_pe, nest_domain, position_u, nz) + call fill_nest_halos_from_parent("v", Atm(n)%v, interp_type_v, Atm(child_grid_num)%neststruct%wt_v, & + Atm(child_grid_num)%neststruct%ind_v, x_refine, y_refine, is_fine_pe, nest_domain, position_v, nz) + + end subroutine mn_prog_fill_nest_halos_from_parent + + !!============================================================================ + !! Step 3 -- Redefine the nest domain to new location + !! This calls mpp_shift_nest_domains. + !! -- Similar to med_nest_configure() from HWRF + !!============================================================================ + + !>@brief The subroutine 'mn_meta_move_nest' resets the metadata for the nest + !>@details Parent and nest PEs run this subroutine. + subroutine mn_meta_move_nest(delta_i_c, delta_j_c, pelist, is_fine_pe, extra_halo, nest_domain, domain_fine, domain_coarse, & + istart_coarse, iend_coarse, jstart_coarse, jend_coarse, istart_fine, iend_fine, jstart_fine, jend_fine) + + implicit none + + integer, intent(in) :: delta_i_c, delta_j_c !< Coarse grid delta i,j for nest move + integer, allocatable, intent(in) :: pelist(:) !< List of involved PEs + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + integer, intent(in) :: extra_halo !< Extra halo points (not fully implemented) + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + type(domain2d), intent(inout) :: domain_coarse, domain_fine !< Coarse and fine domain structures + integer, intent(inout) :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse !< Bounds of coarse grid + integer, intent(in) :: istart_fine, iend_fine, jstart_fine, jend_fine !< Bounds of fine grid + + ! Local variables + integer :: num_nest + integer :: this_pe + + integer :: delta_i_coarse(1), delta_j_coarse(1) + + this_pe = mpp_pe() + + ! Initial implementation only supports single moving nest. Update this later. + ! mpp_shift_nest_domains has a call signature to support multiple moving nests, though has not been tested for correctness. + delta_i_coarse(1) = delta_i_c + delta_j_coarse(1) = delta_j_c + + !!=========================================================== + !! + !! Relocate where the nest is aligned on the parent + !! + !!=========================================================== + + istart_coarse = istart_coarse + delta_i_c + iend_coarse = iend_coarse + delta_i_c + + jstart_coarse = jstart_coarse + delta_j_c + jend_coarse = jend_coarse + delta_j_c + + ! The fine nest will maintain the same indices + + num_nest = nest_domain%num_nest + + ! TODO Verify whether rerunning this will cause (small) memory leaks. + if (is_fine_pe) then + call mpp_shift_nest_domains(nest_domain, domain_fine, delta_i_coarse, delta_j_coarse, extra_halo) + else + call mpp_shift_nest_domains(nest_domain, domain_coarse, delta_i_coarse, delta_j_coarse, extra_halo) + endif + + end subroutine mn_meta_move_nest + + + !================================================================================ + !! Step 4 -- Updates the internal nest tile halos + !================================================================================ + + !>@brief The subroutine 'mn_prog_fill_intern_nest_halos' fill internal nest halos for prognostic variables + !>@details Only nest PEs call this subroutine. + subroutine mn_prog_fill_intern_nest_halos(Atm, domain_fine, is_fine_pe) + type(fv_atmos_type), target, intent(inout) :: Atm !< Single instance of atmospheric data + type(domain2d), intent(inout) :: domain_fine !< Domain structure for nest + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + integer :: this_pe + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(2)%mn_prog ! TODO allow nest number to vary + this_pe = mpp_pe() + + call mn_var_fill_intern_nest_halos(Atm%q_con, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%pt, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%w, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(Atm%omga, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%delp, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_prog%delz, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(Atm%ua, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(Atm%va, domain_fine, is_fine_pe) + + ! The vector form of the subroutine takes care of the staggering of the wind variables internally. + call mn_var_fill_intern_nest_halos(Atm%u, Atm%v, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(Atm%q, domain_fine, is_fine_pe) + + end subroutine mn_prog_fill_intern_nest_halos + + + !================================================================================ + ! + ! Step 4 -- Per variable fill internal nest halos + ! + !================================================================================ + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r4_2d' fills internal nest halos + !>@details This version of the subroutine is for 2D arrays of single precision reals. + subroutine mn_var_fill_intern_nest_halos_r4_2d(data_var, domain_fine, is_fine_pe) + real*4, allocatable, intent(inout) :: data_var(:,:) !< Model variable data + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + + integer :: this_pe + this_pe = mpp_pe() + + if (is_fine_pe) then + ! mpp_update_domains fills the halo region of the fine grids for the interior of the nest. + ! The fine nest boundary with the coarse grid remains unchanged. + ! seems that this only performs communication between fine nest PEs + ! Just transfers halo data between tiles of same resolution -- doesn't perform any interpolation! + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + endif + + end subroutine mn_var_fill_intern_nest_halos_r4_2d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r8_2d' fills internal nest halos + !>@details This version of the subroutine is for 2D arrays of double precision reals. + subroutine mn_var_fill_intern_nest_halos_r8_2d(data_var, domain_fine, is_fine_pe) + real*8, allocatable, intent(inout) :: data_var(:,:) !< Double precision model variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + if (is_fine_pe) then + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + endif + + end subroutine mn_var_fill_intern_nest_halos_r8_2d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r4_3d' fills internal nest halos + !>@details This version of the subroutine is for 3D arrays of single precision reals. + subroutine mn_var_fill_intern_nest_halos_r4_3d(data_var, domain_fine, is_fine_pe) + real*4, allocatable, intent(inout) :: data_var(:,:,:) !< Single precision model variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + if (is_fine_pe) then + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + endif + + end subroutine mn_var_fill_intern_nest_halos_r4_3d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r8_3d' fills internal nest halos + !>@details This version of the subroutine is for 3D arrays of double precision reals. + subroutine mn_var_fill_intern_nest_halos_r8_3d(data_var, domain_fine, is_fine_pe) + real*8, allocatable, intent(inout) :: data_var(:,:,:) !< Double precision model variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + if (is_fine_pe) then + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + endif + + end subroutine mn_var_fill_intern_nest_halos_r8_3d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_wind' fills internal nest halos for u and v wind + !>@details This version of the subroutine is for 3D arrays of single precision reals for each wind component + subroutine mn_var_fill_intern_nest_halos_wind(u_var, v_var, domain_fine, is_fine_pe) + real, allocatable, intent(inout) :: u_var(:,:,:) !< Staggered u wind + real, allocatable, intent(inout) :: v_var(:,:,:) !< Staggered v wind + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + if (is_fine_pe) then + call mpp_update_domains(u_var, v_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE, gridtype=DGRID_NE) + endif + + end subroutine mn_var_fill_intern_nest_halos_wind + + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r4_4d' fills internal nest halos + !>@details This version of the subroutine is for 4D arrays of single precision reals. + subroutine mn_var_fill_intern_nest_halos_r4_4d(data_var, domain_fine, is_fine_pe) + real*4, allocatable, intent(inout) :: data_var(:,:,:,:) !< Single prevision variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + if (is_fine_pe) then + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + endif + + end subroutine mn_var_fill_intern_nest_halos_r4_4d + + !>@brief The subroutine 'mn_var_fill_intern_nest_halos_r8_4d' fills internal nest halos + !>@details This version of the subroutine is for 4D arrays of double precision reals. + subroutine mn_var_fill_intern_nest_halos_r8_4d(data_var, domain_fine, is_fine_pe) + real*8, allocatable, intent(inout) :: data_var(:,:,:,:) !< Double precision variable + type(domain2d), intent(inout) :: domain_fine !< Nest domain structure + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + + if (is_fine_pe) then + call mpp_update_domains(data_var, domain_fine, flags=NUPDATE + EUPDATE + SUPDATE + WUPDATE) + endif + + end subroutine mn_var_fill_intern_nest_halos_r8_4d + + !>@brief Find the parent point that corresponds to the is,js point of the nest, and returns that nest point also + subroutine calc_nest_alignment(Atm, n, nest_x, nest_y, parent_x, parent_y) + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) !< Atm data array + integer, intent(in) :: n !< Grid numbers + integer, intent(out) :: nest_x, nest_y, parent_x, parent_y + + integer :: refine + integer :: child_grid_num + integer :: ioffset, joffset + + child_grid_num = n + + refine = Atm(child_grid_num)%neststruct%refinement + + ! parent_x and parent_y are on the supergrid, so an increment of ioffset is an increment of 2*refine + + nest_x = Atm(child_grid_num)%bd%isd + nest_y = Atm(child_grid_num)%bd%jsd + + ioffset = Atm(n)%neststruct%ioffset + joffset = Atm(n)%neststruct%joffset + + ! Increment of 3 is for halo. Factor of 2 is for supergrid. + parent_x = (nest_x - 3)*2 + ioffset*refine*2 + parent_y = (nest_y - 3)*2 + joffset*refine*2 + + end subroutine calc_nest_alignment + + + + subroutine check_nest_alignment(nest_geo, parent_geo, nest_x, nest_y, parent_x, parent_y, found) + type(grid_geometry), intent(in) :: nest_geo !< Tile geometry + type(grid_geometry), intent(in) :: parent_geo !< Parent grid at high-resolution geometry + integer, intent(in) :: nest_x, nest_y, parent_x, parent_y + logical, intent(out) :: found + + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: rad2deg + integer :: this_pe + + this_pe = mpp_pe() + rad2deg = 180.0 / pi + + found = .False. + + if (abs(parent_geo%lats(parent_x, parent_y) - nest_geo%lats(nest_x, nest_y)) .lt. 0.0001) then + if (abs(parent_geo%lons(parent_x, parent_y) - nest_geo%lons(nest_x, nest_y)) .lt. 0.0001) then + found = .True. + endif + if (abs(abs(parent_geo%lons(parent_x, parent_y) - nest_geo%lons(nest_x, nest_y)) - 2*pi) .lt. 0.0001) then + found = .True. + endif + endif + + end subroutine check_nest_alignment + + !!============================================================================ + !! Step 5.1 -- Load the latlon data from NetCDF + !! update parent_geo, tile_geo*, p_grid*, n_grid* + !!============================================================================ + + !>@brief The subroutine 'mn_latlon_load_parent' loads parent latlon data from netCDF + !>@details Updates parent_geo, tile_geo*, p_grid*, n_grid* + subroutine mn_latlon_load_parent(surface_dir, Atm, n, parent_tile, delta_i_c, delta_j_c, pelist, child_grid_num, parent_geo, tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, p_grid, n_grid, p_grid_u, n_grid_u, p_grid_v, n_grid_v) + character(len=*), intent(in) :: surface_dir !< Directory for static files + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) !< Atm data array + integer, intent(in) :: n, parent_tile, child_grid_num !< Grid numbers + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion in delta i,j + integer, allocatable, intent(in) :: pelist(:) !< PE list for fms2_io + type(grid_geometry), intent(inout) :: parent_geo, tile_geo, tile_geo_u, tile_geo_v !< Tile geometries + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Parent grid at high-resolution geometry + real(kind=R_GRID), allocatable, intent(inout):: p_grid(:,:,:) !< A-stagger lat/lon grids + real(kind=R_GRID), allocatable, intent(inout):: p_grid_u(:,:,:) !< u-wind staggered lat/lon grids + real(kind=R_GRID), allocatable, intent(inout):: p_grid_v(:,:,:) !< v-wind staggered lat/lon grids + real(kind=R_GRID), allocatable, intent(out) :: n_grid(:,:,:) !< A-stagger lat/lon grids + real(kind=R_GRID), allocatable, intent(out) :: n_grid_u(:,:,:) !< u-wind staggered lat/lon grids + real(kind=R_GRID), allocatable, intent(out) :: n_grid_v(:,:,:) !< v-wind staggered lat/lon grids + + character(len=256) :: grid_filename + logical, save :: first_nest_move = .true. + integer, save :: p_istart_fine, p_iend_fine, p_jstart_fine, p_jend_fine + integer :: x, y, fp_i, fp_j + integer :: position, position_u, position_v + integer :: x_refine, y_refine + integer :: this_pe + + logical, save :: first_time = .True. + integer, save :: id_load1, id_load2, id_load3, id_load4, id_load5 + logical :: use_timers + + use_timers = Atm(n)%flagstruct%fv_timers + + this_pe = mpp_pe() + + if (first_time) then + if (use_timers) then + id_load1 = mpp_clock_id ('MN LatLon Part 1 File', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_load2 = mpp_clock_id ('MN LatLon Part 2 File', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_load3 = mpp_clock_id ('MN LatLon Part 3 File', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_load4 = mpp_clock_id ('MN LatLon Part 4 File', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_load5 = mpp_clock_id ('MN LatLon Part 5 File', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + endif + first_time = .False. + endif + + position = CENTER + position_u = NORTH + position_v = EAST + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + + ! Setup parent_geo with the values for the parent tile + ! Note that lat/lon are stored in the model in RADIANS + ! Only the netCDF files use degrees + + if (first_nest_move) then + if (use_timers) call mpp_clock_begin (id_load1) + + call mn_static_filename(surface_dir, parent_tile, 'grid', 1, grid_filename) + call load_nest_latlons_from_nc(grid_filename, Atm(1)%npx, Atm(1)%npy, 1, pelist, & + parent_geo, p_istart_fine, p_iend_fine, p_jstart_fine, p_jend_fine) + + ! These are saved between timesteps in fv_moving_nest_main.F90 + allocate(p_grid(1:parent_geo%nxp, 1:parent_geo%nyp,2)) + allocate(p_grid_u(1:parent_geo%nxp, 1:parent_geo%nyp+1,2)) + allocate(p_grid_v(1:parent_geo%nxp+1, 1:parent_geo%nyp,2)) + + ! These are big (parent grid size), and do not change during the model integration. + call assign_p_grids(parent_geo, p_grid, position) + call assign_p_grids(parent_geo, p_grid_u, position_u) + call assign_p_grids(parent_geo, p_grid_v, position_v) + + first_nest_move = .false. + if (use_timers) call mpp_clock_end (id_load1) + endif + + if (use_timers) call mpp_clock_begin (id_load2) + + parent_geo%nxp = Atm(1)%npx + parent_geo%nyp = Atm(1)%npy + + parent_geo%nx = Atm(1)%npx - 1 + parent_geo%ny = Atm(1)%npy - 1 + + !=========================================================== + ! Begin tile_geo per PE. + !=========================================================== + + !------------------------ + ! Grid Definitions + !------------------------ + ! + ! tile_geo - lat/lons on A-grid (cell centers) for nest, on data domain (includes halo) for each PE + ! parent_geo - lat/lons of supergrid for parent + ! n_grid - lat/lons of cell centers for nest + ! p_grid - lat/lons of cell centers for parent + ! + ! gridstruct%agrid - cell centers for each PE + ! gridstruct%grid - cell corners for each PE + + ! Allocate tile_geo just for this PE, copied from Atm(n)%gridstruct%agrid + tile_geo%nx = ubound(Atm(n)%gridstruct%agrid, 1) - lbound(Atm(n)%gridstruct%agrid, 1) + tile_geo%ny = ubound(Atm(n)%gridstruct%agrid, 2) - lbound(Atm(n)%gridstruct%agrid, 2) + tile_geo%nxp = tile_geo%nx + 1 + tile_geo%nyp = tile_geo%ny + 1 + + allocate(tile_geo%lons(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + allocate(tile_geo%lats(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + + tile_geo%lats = -999.9 + tile_geo%lons = -999.9 + + do x = lbound(Atm(n)%gridstruct%agrid, 1), ubound(Atm(n)%gridstruct%agrid, 1) + do y = lbound(Atm(n)%gridstruct%agrid, 2), ubound(Atm(n)%gridstruct%agrid, 2) + tile_geo%lons(x,y) = Atm(n)%gridstruct%agrid(x,y,1) + tile_geo%lats(x,y) = Atm(n)%gridstruct%agrid(x,y,2) + enddo + enddo + + if (use_timers) call mpp_clock_end (id_load2) + if (use_timers) call mpp_clock_begin (id_load3) + + ! Allocate tile_geo_u just for this PE, copied from Atm(n)%gridstruct%grid + ! grid is 1 larger than agrid + ! u(npx, npy+1) + tile_geo_u%nx = ubound(Atm(n)%gridstruct%agrid, 1) - lbound(Atm(n)%gridstruct%agrid, 1) + tile_geo_u%ny = ubound(Atm(n)%gridstruct%grid, 2) - lbound(Atm(n)%gridstruct%grid, 2) + tile_geo_u%nxp = tile_geo_u%nx + 1 + tile_geo_u%nyp = tile_geo_u%ny + 1 + + + if (.not. allocated(tile_geo_u%lons)) then + allocate(tile_geo_u%lons(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%grid, 2):ubound(Atm(n)%gridstruct%grid, 2))) + allocate(tile_geo_u%lats(lbound(Atm(n)%gridstruct%agrid, 1):ubound(Atm(n)%gridstruct%agrid, 1), lbound(Atm(n)%gridstruct%grid, 2):ubound(Atm(n)%gridstruct%grid, 2))) + endif + + tile_geo_u%lons = -999.9 + tile_geo_u%lats = -999.9 + + ! Allocate tile_geo_v just for this PE, copied from Atm(n)%gridstruct%grid + ! grid is 1 larger than agrid + ! u(npx, npy+1) + tile_geo_v%nx = ubound(Atm(n)%gridstruct%grid, 1) - lbound(Atm(n)%gridstruct%grid, 1) + tile_geo_v%ny = ubound(Atm(n)%gridstruct%agrid, 2) - lbound(Atm(n)%gridstruct%agrid, 2) + tile_geo_v%nxp = tile_geo_v%nx + 1 + tile_geo_v%nyp = tile_geo_v%ny + 1 + + allocate(tile_geo_v%lons(lbound(Atm(n)%gridstruct%grid, 1):ubound(Atm(n)%gridstruct%grid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + allocate(tile_geo_v%lats(lbound(Atm(n)%gridstruct%grid, 1):ubound(Atm(n)%gridstruct%grid, 1), lbound(Atm(n)%gridstruct%agrid, 2):ubound(Atm(n)%gridstruct%agrid, 2))) + + tile_geo_v%lons = -999.9 + tile_geo_v%lats = -999.9 + + !=========================================================== + ! End tile_geo per PE. + !=========================================================== + + allocate(n_grid(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 2)) + n_grid = real_snan + + allocate(n_grid_u(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed+1, 2)) + n_grid_u = real_snan + + allocate(n_grid_v(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied+1, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 2)) + n_grid_v = real_snan + + ! TODO - propagate tile_geo information back to Atm structure + ! TODO - deallocate tile_geo lat/lons + ! TODO - ensure the allocation of tile_geo lat/lons is only performed once - outside the loop + + if (use_timers) call mpp_clock_end (id_load3) + if (use_timers) call mpp_clock_begin (id_load4) + + call move_nest_geo(Atm, n, tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, delta_i_c, delta_j_c, x_refine, y_refine) + + if (use_timers) call mpp_clock_end (id_load4) + if (use_timers) call mpp_clock_begin (id_load5) + + ! These grids are small (nest size), and change each time nest moves. + call assign_n_grids(tile_geo, n_grid, position) + call assign_n_grids(tile_geo_u, n_grid_u, position_u) + call assign_n_grids(tile_geo_v, n_grid_v, position_v) + + if (use_timers) call mpp_clock_end (id_load5) + + end subroutine mn_latlon_load_parent + + !>@brief The subroutine 'mn_static_filename' generates the full pathname for a static file for each run + !>@details Constructs the full pathname for a variable and refinement level and tests whether it exists + subroutine mn_static_filename(surface_dir, tile_num, tag, refine, grid_filename) + character(len=*), intent(in) :: surface_dir !< Directory + character(len=*), intent(in) :: tag !< Variable name + integer, intent(in) :: tile_num !< Tile number + integer, intent(in) :: refine !< Nest refinement + character(len=*), intent(out) :: grid_filename !< Output pathname to netCDF file + + character(len=256) :: refine_str, parent_str + character(len=1) :: divider + logical :: file_exists + + write(parent_str, '(I0)'), tile_num + + if (refine .eq. 1 .and. (tag .eq. 'grid' .or. tag .eq. 'oro_data')) then + ! For 1x files in INPUT directory; go at the symbolic link + grid_filename = trim(trim(surface_dir) // '/' // trim(tag) // '.tile' // trim(parent_str) // '.nc') + else + if (refine .eq. 1) then + grid_filename = trim(trim(surface_dir) // '/' // trim(tag) // '.tile' // trim(parent_str) // '.nc') + else + write(refine_str, '(I0,A1)'), refine, 'x' + grid_filename = trim(trim(surface_dir) // '/' // trim(tag) // '.tile' // trim(parent_str) // '.' // trim(refine_str) // '.nc') + endif + endif + + grid_filename = trim(grid_filename) + + inquire(FILE=grid_filename, EXIST=file_exists) + if (.not. file_exists) then + call mpp_error(FATAL, 'mn_static_filename DOES NOT EXIST '//trim(grid_filename)) + endif + + end subroutine mn_static_filename + + !>@brief The subroutine 'mn_latlon_read_hires_parent' reads in static data from a netCDF file + subroutine mn_latlon_read_hires_parent(npx, npy, refine, pelist, fp_super_tile_geo, surface_dir, parent_tile) + integer, intent(in) :: npx, npy, refine !< Number of points in x,y, and refinement + integer, allocatable, intent(in) :: pelist(:) !< PE list for fms2_io + type(grid_geometry), intent(inout) :: fp_super_tile_geo !< Geometry of supergrid for parent tile at high resolution + character(len=*), intent(in) :: surface_dir !< Surface directory to read netCDF file from + integer, intent(in) :: parent_tile !< Parent tile number + + integer :: fp_super_istart_fine, fp_super_jstart_fine,fp_super_iend_fine, fp_super_jend_fine + character(len=256) :: grid_filename + + call mn_static_filename(surface_dir, parent_tile, 'grid', refine, grid_filename) + + call load_nest_latlons_from_nc(trim(grid_filename), npx, npy, refine, pelist, & + fp_super_tile_geo, fp_super_istart_fine, fp_super_iend_fine, fp_super_jstart_fine, fp_super_jend_fine) + + end subroutine mn_latlon_read_hires_parent + + !>@brief The subroutine 'mn_orog_read_hires_parent' loads parent orography data from netCDF + !>@details Gathers a number of terrain-related variables from the netCDF file + subroutine mn_orog_read_hires_parent(npx, npy, refine, pelist, surface_dir, filtered_terrain, orog_grid, orog_std_grid, ls_mask_grid, land_frac_grid, parent_tile) + integer, intent(in) :: npx, npy, refine !< Number of points in x,y, and refinement + integer, allocatable, intent(in) :: pelist(:) !< PE list for fms2_io + character(len=*), intent(in) :: surface_dir !< Surface directory to read netCDF file from + logical, intent(in) :: filtered_terrain !< Whether to use filtered terrain + real, allocatable, intent(out) :: orog_grid(:,:) !< Output orography grid + real, allocatable, intent(out) :: orog_std_grid(:,:) !< Output orography standard deviation grid + real, allocatable, intent(out) :: ls_mask_grid(:,:) !< Output land sea mask grid + real, allocatable, intent(out) :: land_frac_grid(:,:)!< Output land fraction grid + integer, intent(in) :: parent_tile !< Parent tile number + + integer :: nx_cubic, nx, ny, fp_nx, fp_ny, mid_nx, mid_ny + integer :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + character(len=512) :: nc_filename + character(len=16) :: orog_var_name + integer :: this_pe + + this_pe = mpp_pe() + + nx_cubic = npx - 1 + nx = npx - 1 + ny = npy - 1 + + fp_istart_fine = 0 + fp_iend_fine = nx * refine + fp_jstart_fine = 0 + fp_jend_fine = ny * refine + + fp_nx = fp_iend_fine - fp_istart_fine + fp_ny = fp_jend_fine - fp_jstart_fine + + mid_nx = (fp_iend_fine - fp_istart_fine) / 2 + mid_ny = (fp_jend_fine - fp_jstart_fine) / 2 + + call mn_static_filename(surface_dir, parent_tile, 'oro_data', refine, nc_filename) + + if (filtered_terrain) then + orog_var_name = 'orog_filt' + else + orog_var_name = 'orog_raw' + endif + + call alloc_read_data(nc_filename, orog_var_name, fp_nx, fp_ny, orog_grid, pelist) + call alloc_read_data(nc_filename, 'slmsk', fp_nx, fp_ny, ls_mask_grid, pelist) + + call alloc_read_data(nc_filename, 'stddev', fp_nx, fp_ny, orog_std_grid, pelist) ! TODO validate if this is needed + call alloc_read_data(nc_filename, 'land_frac', fp_nx, fp_ny, land_frac_grid, pelist) ! TODO validate if this is needed + + end subroutine mn_orog_read_hires_parent + + !>@brief The subroutine 'mn_static_read_hires_r4' loads high resolution data from netCDF + !>@details Gathers a single variable from the netCDF file + subroutine mn_static_read_hires_r4(npx, npy, refine, pelist, surface_dir, file_prefix, var_name, data_grid, parent_tile, time) + integer, intent(in) :: npx, npy, refine !< Number of x,y points and nest refinement + integer, allocatable, intent(in) :: pelist(:) !< PE list for fms2_io + character(len=*), intent(in) :: surface_dir, file_prefix !< Surface directory and file tag + character(len=*), intent(in) :: var_name !< Variable name in netCDF file + real*4, allocatable, intent(out) :: data_grid(:,:) !< Output data grid + integer, intent(in) :: parent_tile !< Parent tile number + integer, intent(in), optional :: time !< Optional month number for time-varying parameters + + character(len=512) :: nc_filename + integer :: nx_cubic, nx, ny, fp_nx, fp_ny + integer :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + + nx_cubic = npx - 1 + nx = npx - 1 + ny = npy - 1 + + fp_istart_fine = 0 + fp_iend_fine = nx * refine + fp_jstart_fine = 0 + fp_jend_fine = ny * refine + + fp_nx = fp_iend_fine - fp_istart_fine + fp_ny = fp_jend_fine - fp_jstart_fine + + call mn_static_filename(surface_dir, parent_tile, file_prefix, refine, nc_filename) + + if (present(time)) then + call alloc_read_data(nc_filename, var_name, fp_nx, fp_ny, data_grid, pelist, time) + else + call alloc_read_data(nc_filename, var_name, fp_nx, fp_ny, data_grid, pelist) + endif + + end subroutine mn_static_read_hires_r4 + + !>@brief The subroutine 'mn_static_read_hires_r8' loads high resolution data from netCDF + !>@details Gathers a single variable from the netCDF file + subroutine mn_static_read_hires_r8(npx, npy, refine, pelist, surface_dir, file_prefix, var_name, data_grid, parent_tile) + integer, intent(in) :: npx, npy, refine !< Number of x,y points and nest refinement + integer, allocatable, intent(in) :: pelist(:) !< PE list for fms2_io + character(len=*), intent(in) :: surface_dir, file_prefix !< Surface directory and file tag + character(len=*), intent(in) :: var_name !< Variable name in netCDF file + real*8, allocatable, intent(out) :: data_grid(:,:) !< Output data grid + integer, intent(in) :: parent_tile !< Parent tile number + + character(len=512) :: nc_filename + + integer :: nx_cubic, nx, ny, fp_nx, fp_ny + integer :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + + nx_cubic = npx - 1 + nx = npx - 1 + ny = npy - 1 + + fp_istart_fine = 0 + fp_iend_fine = nx * refine + fp_jstart_fine = 0 + fp_jend_fine = ny * refine + + fp_nx = fp_iend_fine - fp_istart_fine + fp_ny = fp_jend_fine - fp_jstart_fine + + ! TODO consider adding optional time argument as in mn_static_read_hires_r4 + + call mn_static_filename(surface_dir, parent_tile, file_prefix, refine, nc_filename) + + call alloc_read_data(nc_filename, var_name, fp_nx, fp_ny, data_grid, pelist) + + end subroutine mn_static_read_hires_r8 + + + !!============================================================================ + !! Step 5.2 -- Recalculate nest halo weights + !!============================================================================ + + !>@brief The subroutine 'mn_meta_recalc' recalculates nest halo weights + subroutine mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo, parent_geo, fp_super_tile_geo, & + is_fine_pe, nest_domain, position, p_grid, n_grid, wt, istart_coarse, jstart_coarse) + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion in delta i,j + integer, intent(in) :: x_refine, y_refine !< Nest refinement + type(grid_geometry), intent(inout) :: tile_geo, parent_geo, fp_super_tile_geo !< tile geometries + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + type(nest_domain_type), intent(in) :: nest_domain !< Nest domain structure + real(kind=R_GRID), allocatable, intent(inout) :: p_grid(:,:,:) !< Parent lat/lon grid + real(kind=R_GRID), allocatable, intent(inout) :: n_grid(:,:,:) !< Nest lat/lon grid + real, allocatable, intent(inout) :: wt(:,:,:) !< Interpolation weights + integer, intent(inout) :: position !< Stagger + integer, intent(in) :: istart_coarse, jstart_coarse !< Initian nest offsets + + type(bbox) :: wt_fine, wt_coarse + integer :: this_pe + + this_pe = mpp_pe() + + ! Update the coarse and fine indices after shifting the nest + if (is_fine_pe) then + + !!=========================================================== + !! + !! Recalculate halo weights + !! + !!=========================================================== + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, EAST, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, WEST, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, NORTH, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + call bbox_get_C2F_index(nest_domain, wt_fine, wt_coarse, SOUTH, position) + call calc_nest_halo_weights(wt_fine, wt_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + + endif + + end subroutine mn_meta_recalc + + + !!============================================================================ + !! Step 5.3 -- Adjust index by delta_i_c, delta_j_c + !!============================================================================ + + !>@brief The subroutine 'mn_shift_index' adjusts the index array for a nest move + !>@details Fast routine to increment indices by the delta in i,j direction + subroutine mn_shift_index(delta_i_c, delta_j_c, ind) + integer, intent(in) :: delta_i_c, delta_j_c !< Nest move deltas in i,j + integer, allocatable, intent(inout) :: ind(:,:,:) !< Nest to parent index + + ! Shift the index by the delta of this nest move. + ! TODO -- validate that we are not moving off the edge of the parent grid. + integer :: i, j + + do i = lbound(ind,1), ubound(ind,1) + do j = lbound(ind,2), ubound(ind,2) + ind(i,j,1) = ind(i,j,1) + delta_i_c + ind(i,j,2) = ind(i,j,2) + delta_j_c + enddo + enddo + + end subroutine mn_shift_index + + + !================================================================================ + ! + ! Prognostic and Physics Variable Nest Motion + ! + !================================================================================ + + !!============================================================================ + !! Step 6 Shift the data on each nest PE + !! -- similar to med_nest_move in HWRF + !!============================================================================ + + !>@brief The subroutine 'mn_prog_shift_data' shifts the data on each nest PE + !>@details Iterates through the prognostic variables + subroutine mn_prog_shift_data(Atm, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, & + is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Atm data array + integer, intent(in) :: n, child_grid_num !< Grid numbers + real, allocatable, intent(in) :: wt_h(:,:,:), wt_u(:,:,:), wt_v(:,:,:) !< Interpolation weights + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< Delta i,j, nest refinement + logical, intent(in) :: is_fine_pe !< Is this is a nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: nz !< Number of vertical levels + + ! Constants for mpp calls + integer :: interp_type = 1 ! cell-centered A-grid + integer :: interp_type_u = 4 ! D-grid + integer :: interp_type_v = 4 ! D-grid + integer :: position = CENTER ! CENTER, NORTH, EAST + integer :: position_u = NORTH + integer :: position_v = EAST + + type(fv_moving_nest_prog_type), pointer :: mn_prog + + mn_prog => Moving_nest(n)%mn_prog + + call mn_var_shift_data(Atm(n)%q_con, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%pt, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%w, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + !call mn_var_shift_data(Atm(n)%omga, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%delp, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + !call mn_var_shift_data(Atm(n)%delz, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(mn_prog%delz, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%ua, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%va, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%q, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + call mn_var_shift_data(Atm(n)%u, interp_type_u, wt_u, Atm(child_grid_num)%neststruct%ind_u, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position_u, nz) + + call mn_var_shift_data(Atm(n)%v, interp_type_v, wt_v, Atm(child_grid_num)%neststruct%ind_v, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position_v, nz) + + end subroutine mn_prog_shift_data + + + !!============================================================================ + !! Step 6 - per variable + !!============================================================================ + + !>@brief The subroutine 'mn_prog_shift_data_r4_2d' shifts the data for a variable on each nest PE + !>@details For single variable + subroutine mn_var_shift_data_r4_2d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + real*4, allocatable, intent(inout) :: data_var(:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position !< Grid offset + + real*4, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: nest_level = 1 ! TODO allow to vary + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + !==================================================== + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r4_2d + + !>@brief The subroutine 'mn_prog_shift_data_r8_2d' shifts the data for a variable on each nest PE + !>@details For one double precision 2D variable + subroutine mn_var_shift_data_r8_2d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + real*8, allocatable, intent(inout) :: data_var(:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position !< Grid offset + + real*8, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: nest_level = 1 ! TODO allow to vary + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r8_2d + + !>@brief The subroutine 'mn_prog_shift_data_r4_3d' shifts the data for a variable on each nest PE + !>@details For one single precision 3D variable + subroutine mn_var_shift_data_r4_3d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + real*4, allocatable, intent(inout) :: data_var(:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number of vertical levels + + real*4, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: nest_level = 1 ! TODO allow to vary + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + + !==================================================== + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r4_3d + + + !>@brief The subroutine 'mn_prog_shift_data_r8_3d' shifts the data for a variable on each nest PE + !>@details For one double precision 3D variable + subroutine mn_var_shift_data_r8_3d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + + real*8, allocatable, intent(inout) :: data_var(:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number vertical levels + + real*8, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: nest_level = 1 ! TODO allow to vary + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + !==================================================== + ! Passes data from coarse grid to fine grid's halo buffers; requires nest_domain to be intent(inout) + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r8_3d + + + !>@brief The subroutine 'mn_prog_shift_data_r4_4d' shifts the data for a variable on each nest PE + !>@details For one single precision 4D variable + subroutine mn_var_shift_data_r4_4d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + real*4, allocatable, intent(inout) :: data_var(:,:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number of vertical levels + + real*4, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: n4d + integer :: nest_level = 1 ! TODO allow to vary + + n4d = ubound(data_var, 4) + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + !==================================================== + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r4_4d + + + !>@brief The subroutine 'mn_prog_shift_data_r8_4d' shifts the data for a variable on each nest PE + !>@details For one double precision 4D variable + subroutine mn_var_shift_data_r8_4d(data_var, interp_type, wt, ind, delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + real*8, allocatable, intent(inout) :: data_var(:,:,:,:) !< Data variable + integer, intent(in) :: interp_type !< Interpolation stagger type + real, allocatable, intent(in) :: wt(:,:,:) !< Interpolation weight array + integer, allocatable, intent(in) :: ind(:,:,:) !< Fine to coarse index array + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: position, nz !< Grid offset, number of vertical levels + + real*8, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse ! step 4 + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: n4d + integer :: nest_level = 1 ! TODO allow to vary + + n4d = ubound(data_var, 4) + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + !==================================================== + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + !!=========================================================== + !! + !! Shift grids internal to each nest PE + !! + !!=========================================================== + + if ( delta_i_c .ne. 0 ) then + data_var = eoshift(data_var, x_refine * delta_i_c, DIM=1) + endif + + if (delta_j_c .ne. 0) then + data_var = eoshift(data_var, y_refine * delta_j_c, DIM=2) + endif + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine mn_var_shift_data_r8_4d + + + !================================================================================ + ! + ! Step 7 -- Gridstruct resetting and reallocation of static buffers + ! init_grid() also updates the wt arrays + !================================================================================ + + !>@brief The subroutine 'mn_meta_reset_gridstruct' resets navigation data and reallocates needed data in the gridstruct after nest move + !>@details This routine is computationally demanding and is a target for later optimization. + subroutine mn_meta_reset_gridstruct(Atm, n, child_grid_num, nest_domain, fp_super_tile_geo, x_refine, y_refine, is_fine_pe, wt_h, wt_u, wt_v, a_step, dt_atmos) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Atm data array + integer, intent(in) :: n, child_grid_num !< This level and nest level + type(nest_domain_type), intent(in) :: nest_domain !< Nest domain structure + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Parent high-resolution geometry + integer, intent(in) :: x_refine, y_refine !< Nest refinement + logical, intent(in) :: is_fine_pe !< Is nest PE? + real, allocatable, intent(in) :: wt_h(:,:,:), wt_u(:,:,:), wt_v(:,:,:) !< Interpolation weights + integer, intent(in) :: a_step !< Which timestep + real, intent(in) :: dt_atmos !< Timestep duration in seconds + + integer :: isg, ieg, jsg, jeg + integer :: ng, pp, nn, parent_tile, refinement, ioffset, joffset + integer :: this_pe, gid + integer :: tile_coarse(2) + + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: rad2deg + + ! Coriolis parameter variables + real :: alpha = 0. + real, pointer, dimension(:,:,:) :: grid, agrid + real, pointer, dimension(:,:) :: fC, f0 + integer :: isd, ied, jsd, jed + integer :: i, j + + logical, save :: first_time = .true. + integer, save :: id_reset1, id_reset2, id_reset3, id_reset4, id_reset5, id_reset6, id_reset7 + + logical :: use_timers ! Set this to true to generate performance profiling information in out.* file + + use_timers = Atm(n)%flagstruct%fv_timers + + if (first_time .and. use_timers) then + id_reset1 = mpp_clock_id ('MN 7 Reset 1', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset2 = mpp_clock_id ('MN 7 Reset 2', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset3 = mpp_clock_id ('MN 7 Reset 3', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset4 = mpp_clock_id ('MN 7 Reset 4', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset5 = mpp_clock_id ('MN 7 Reset 5', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset6 = mpp_clock_id ('MN 7 Reset 6', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + id_reset7 = mpp_clock_id ('MN 7 Reset 7', flags = clock_flag_default, grain=CLOCK_ROUTINE ) + endif + + rad2deg = 180.0 / pi + + this_pe = mpp_pe() + gid = this_pe + + parent_tile = Atm(child_grid_num)%neststruct%parent_tile + ioffset = Atm(child_grid_num)%neststruct%ioffset + joffset = Atm(child_grid_num)%neststruct%joffset + + ! Reset the gridstruct values for the nest + if (is_fine_pe) then + ! Fill in values from high resolution, full panel, supergrid + if (use_timers) call mpp_clock_begin (id_reset1) + + call fill_grid_from_supergrid(Atm(n)%gridstruct%grid, CORNER, fp_super_tile_geo, ioffset, joffset, & + x_refine, y_refine) + call fill_grid_from_supergrid(Atm(n)%gridstruct%agrid, CENTER, fp_super_tile_geo, ioffset, joffset, & + x_refine, y_refine) + call fill_grid_from_supergrid(Atm(n)%gridstruct%grid_64, CORNER, fp_super_tile_geo, & + ioffset, joffset, x_refine, y_refine) + call fill_grid_from_supergrid(Atm(n)%gridstruct%agrid_64, CENTER, fp_super_tile_geo, & + ioffset, joffset, x_refine, y_refine) + + ! Reset the coriolis parameters, using code from external_ic.F90::get_external_ic() + + isd = Atm(n)%bd%isd + ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd + jed = Atm(n)%bd%jed + + grid => Atm(n)%gridstruct%grid + agrid => Atm(n)%gridstruct%agrid + fC => Atm(n)%gridstruct%fC + f0 => Atm(n)%gridstruct%f0 + + ! * Initialize coriolis param: + + do j=jsd,jed+1 + do i=isd,ied+1 + fC(i,j) = 2.*omega*( -1.*cos(grid(i,j,1))*cos(grid(i,j,2))*sin(alpha) + & + sin(grid(i,j,2))*cos(alpha) ) + enddo + enddo + + do j=jsd,jed + do i=isd,ied + f0(i,j) = 2.*omega*( -1.*cos(agrid(i,j,1))*cos(agrid(i,j,2))*sin(alpha) + & + sin(agrid(i,j,2))*cos(alpha) ) + enddo + enddo + + + !! Let this get reset in init_grid()/setup_aligned_nest() + !call fill_grid_from_supergrid(Atm(n)%grid_global, CORNER, fp_super_tile_geo, & + ! ioffset, joffset, x_refine, y_refine) + + if (use_timers) call mpp_clock_end (id_reset1) + if (use_timers) call mpp_clock_begin (id_reset2) + + ! TODO should these get reset by init_grid instead?? + call fill_weight_grid(Atm(n)%neststruct%wt_h, wt_h) + call fill_weight_grid(Atm(n)%neststruct%wt_u, wt_u) + call fill_weight_grid(Atm(n)%neststruct%wt_v, wt_v) + ! TODO -- Seems like this is not used anywhere, other than being allocated, filled, deallocated + !call fill_weight_grid(Atm(n)%neststruct%wt_b, wt_b) + + if (use_timers) call mpp_clock_end (id_reset2) + + endif + + if (use_timers) call mpp_clock_begin (id_reset3) + + ! TODO Write clearer comments on what is happening here. + + ! This code runs several communications steps: + ! 1. As npe=0, it gets the global_grid domain setup + ! 2. sends the global_grid to the other parent PEs + ! 3. global_grid is received in call to setup_aligned_nest() in fv_grid_tools.F90::init_grid() + ! Other communication is contained full within setup_aligned_nest(). + + ! Sends around data from the parent grids, and recomputes the update indices + ! This code copied from fv_control.F90 + ! Need to SEND grid_global to any child grids; this is received in setup_aligned_nest in fv_grid_tools + ! if (Atm(pp)%neststruct%nested) then + + ! TODO phrase this more carefully to choose the parent master PE grid if we are operating in a nested setup. + ! Unlike in fv_control.F90, this will be running on Atm(1) when it's on pe=0, so we don't need to navigate to parent_grid. + + first_time = .false. + + ! Seems like we do not need to resend this -- setup_aligned_nest now saves the parent tile information during model initialization, + ! which happens before we enter the moving nest code. + if (this_pe .eq. 0 .and. first_time) then + + ! This is the Atm index for the nest values. + pp = child_grid_num + + refinement = x_refine + ng = Atm(n)%ng + + call mpp_get_global_domain( Atm(n)%domain, isg, ieg, jsg, jeg) + + !FIXME: Should replace this by generating the global grid (or at least one face thereof) on the + ! nested PEs instead of sending it around. + !if (gid == Atm(pp)%parent_grid%pelist(1)) then + + call mpp_send(Atm(n)%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile), & + size(Atm(n)%grid_global(isg-ng:ieg+1+ng,jsg-ng:jeg+1+ng,1:2,parent_tile)), & + Atm(pp)%pelist(1)) !send to p_ind in setup_aligned_nest + + call mpp_sync_self() + !endif + endif + + !if (ngrids > 1) call setup_update_regions ! Originally from fv_control.F90 + call mn_setup_update_regions(Atm, n, nest_domain) + + if (use_timers) call mpp_clock_end (id_reset3) + if (use_timers) call mpp_clock_begin (id_reset4) + + if (Atm(n)%neststruct%nested) then + ! New code from fv_control.F90 + ! call init_grid(Atm(this_grid), Atm(this_grid)%flagstruct%grid_name, Atm(this_grid)%flagstruct%grid_file, & + ! Atm(this_grid)%flagstruct%npx, Atm(this_grid)%flagstruct%npy, Atm(this_grid)%flagstruct%npz, Atm(this_grid)%flagstruct%ndims, Atm(this_grid)%flagstruct%ntiles, Atm(this_grid)%ng, tile_coarse) + + ! Atm(n)%neststruct%parent_tile = tile_coarse(n) + + ! Old Code + !call init_grid(Atm(n), Atm(n)%flagstruct%grid_name, Atm(n)%flagstruct%grid_file, & + ! Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, Atm(n)%ng) + + !tile_coarse(1) = Atm(n)%neststruct%parent_tile + tile_coarse(1) = parent_tile + tile_coarse(2) = parent_tile + + call init_grid(Atm(n), Atm(n)%flagstruct%grid_name, Atm(n)%flagstruct%grid_file, & + Atm(n)%flagstruct%npx, Atm(n)%flagstruct%npy, Atm(n)%flagstruct%npz, & + Atm(n)%flagstruct%ndims, Atm(n)%flagstruct%ntiles, Atm(n)%ng, tile_coarse) + endif + + if (use_timers) call mpp_clock_end (id_reset4) + if (use_timers) call mpp_clock_begin (id_reset5) + + ! Reset the gridstruct values for the nest + if (is_fine_pe) then + call grid_utils_init(Atm(n), Atm(n)%npx, Atm(n)%npy, Atm(n)%npz, & + Atm(n)%flagstruct%non_ortho, Atm(n)%flagstruct%grid_type, Atm(n)%flagstruct%c2l_ord) + endif + + if (use_timers) call mpp_clock_end (id_reset5) + if (use_timers) call mpp_clock_begin (id_reset6) + + !call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + ! Needs to run for parent and nest Atm(2) + ! Nest PEs update ind_update_h -- this now seems obsolete + ! Parent tile PEs update isu, ieu, jsu, jeu + ! Global tiles that are not parent have no changes + + ! Update: This is now accomplished with the earlier call to setup_update_regions() + !call reinit_parent_indices(Atm(2)) + !!call reinit_parent_indices(Atm(n)) + + ! Reallocate the halo buffers in the neststruct, as some are now the wrong size + ! Optimization would be to only deallocate the edges that have changed. + + ! TODO Write comments on the t0 and t1 buffers + if (use_timers) call mpp_clock_end (id_reset6) + if (use_timers) call mpp_clock_begin (id_reset7) + + if (is_fine_pe) then + !call reallocate_BC_buffers(Atm(child_grid_num)) + call reallocate_BC_buffers(Atm(1)) + + ! Reallocate buffers that are declared in fv_nesting.F90 + call dealloc_nested_buffers(Atm(1)) + + ! Set both to true so the call to setup_nested_grid_BCs() (at the beginning of fv_dynamics()) will reset t0 buffers + ! They will be returned to false by setup_nested_grid_BCs() + + Atm(n)%neststruct%first_step = .true. + !Atm(n)%flagstruct%make_nh= .true. + + !! Fill in the BC time1 buffers + !call setup_nested_grid_BCs(npx, npy, npz, zvir, ncnst, & + ! u, v, w, pt, delp, delz, q, uc, vc, pkz, & + ! neststruct%nested, flagstruct%inline_q, flagstruct%make_nh, ng, & + ! gridstruct, flagstruct, neststruct, & + ! neststruct%nest_timestep, neststruct%tracer_nest_timestep, & + ! domain, bd, nwat) + + ! Transfer the BC time1 buffers to time0 + + !call set_NH_BCs_t0(neststruct) + !call set_BCs_t0(ncnst, flagstruct%hydrostatic, neststruct) + + endif + if (use_timers) call mpp_clock_end (id_reset7) + + end subroutine mn_meta_reset_gridstruct + + + ! Copied and adapted from fv_control.F90::setup_update_regions(); where it is an internal subroutine + ! Modifications only to pass necessary variables as arguments + + !>@brief The subroutine 'mn_setup_update_regions' performs some of the tasks of fv_control.F90::setup_update_regions() for nest motion + !>@details This routine only updates indices, so is computationally efficient + subroutine mn_setup_update_regions(Atm, this_grid, nest_domain) + type(fv_atmos_type), allocatable, intent(INOUT) :: Atm(:) !< Array of atmospheric data + integer, intent(IN) :: this_grid !< Parent or child grid number + type(nest_domain_type), intent(in) :: nest_domain !< Nest domain structure + + integer :: isu, ieu, jsu, jeu ! update regions + integer :: isc, jsc, iec, jec + integer :: upoff + integer :: ngrids, n, nn + integer :: isu_stag, isv_stag, jsu_stag, jsv_stag + integer :: ieu_stag, iev_stag, jeu_stag, jev_stag + integer :: this_pe + + this_pe = mpp_pe() + + ! Need to get the following variables from nest_domain + ! tile_coarse() + ! icount_coarse() + ! from mpp_define_nest_domains.inc: iend_coarse(n) = istart_coarse(n) + icount_coarse(n) - 1 + ! rearrange to: iend_coarse(n) - istart_coarse(n) + 1 = icount_coarse(n) + ! jcount_coarse() + ! nest_ioffsets() + ! in fv_control.F90. pass nest_ioffsets as istart_coarse + ! nest_joffsets() + + isc = Atm(this_grid)%bd%isc + jsc = Atm(this_grid)%bd%jsc + iec = Atm(this_grid)%bd%iec + jec = Atm(this_grid)%bd%jec + + upoff = Atm(this_grid)%neststruct%upoff + + ngrids = size(Atm) + + do n=2,ngrids + nn = n - 1 ! TODO revise this to handle multiple nests. This adjusts to match fv_control.F90 where these + ! arrays are passed in to mpp_define_nest_domains with bounds (2:ngrids) + + ! Updated code from new fv_control.F90 November 8. 2021 Ramstrom + + if (nest_domain%tile_coarse(nn) == Atm(this_grid)%global_tile) then + + !isu = nest_ioffsets(n) + isu = nest_domain%istart_coarse(nn) + !ieu = isu + icount_coarse(n) - 1 + ieu = isu + (nest_domain%iend_coarse(nn) - nest_domain%istart_coarse(nn) + 1) - 1 + + !jsu = nest_joffsets(n) + jsu = nest_domain%jstart_coarse(nn) + !jeu = jsu + jcount_coarse(n) - 1 + jeu = jsu + (nest_domain%jend_coarse(nn) - nest_domain%jstart_coarse(nn) + 1) - 1 + +!!! Begin new + isu_stag = isu + jsu_stag = jsu + ieu_stag = ieu + jeu_stag = jeu+1 + + isv_stag = isu + jsv_stag = jsu + iev_stag = ieu+1 + jev_stag = jeu +!!! End new + + + !update offset adjustment + isu = isu + upoff + ieu = ieu - upoff + jsu = jsu + upoff + jeu = jeu - upoff + +!!! Begin new + isu_stag = isu_stag + upoff + ieu_stag = ieu_stag - upoff + jsu_stag = jsu_stag + upoff + jeu_stag = jeu_stag - upoff + + isv_stag = isv_stag + upoff + iev_stag = iev_stag - upoff + jsv_stag = jsv_stag + upoff + jev_stag = jev_stag - upoff + + ! Absolute boundary for the staggered point update region on the parent. + ! This is used in remap_uv to control the update of the last staggered point + ! when the the update region coincides with a pe domain to avoid cross-restart repro issues + + Atm(n)%neststruct%jeu_stag_boundary = jeu_stag + Atm(n)%neststruct%iev_stag_boundary = iev_stag + + if (isu > iec .or. ieu < isc .or. & + jsu > jec .or. jeu < jsc ) then + isu = -999 ; jsu = -999 ; ieu = -1000 ; jeu = -1000 + else + isu = max(isu,isc) ; jsu = max(jsu,jsc) + ieu = min(ieu,iec) ; jeu = min(jeu,jec) + endif + + ! Update region for staggered quantity to avoid cross repro issues when the pe domain boundary + ! coincide with the nest. Basically write the staggered update on compute domains + + if (isu_stag > iec .or. ieu_stag < isc .or. & + jsu_stag > jec .or. jeu_stag < jsc ) then + isu_stag = -999 ; jsu_stag = -999 ; ieu_stag = -1000 ; jeu_stag = -1000 + else + isu_stag = max(isu_stag,isc) ; jsu_stag = max(jsu_stag,jsc) + ieu_stag = min(ieu_stag,iec) ; jeu_stag = min(jeu_stag,jec) + endif + + if (isv_stag > iec .or. iev_stag < isc .or. & + jsv_stag > jec .or. jev_stag < jsc ) then + isv_stag = -999 ; jsv_stag = -999 ; iev_stag = -1000 ; jev_stag = -1000 + else + isv_stag = max(isv_stag,isc) ; jsv_stag = max(jsv_stag,jsc) + iev_stag = min(iev_stag,iec) ; jev_stag = min(jev_stag,jec) + endif +!!! End new + + if (isu > iec .or. ieu < isc .or. & + jsu > jec .or. jeu < jsc ) then + isu = -999 ; jsu = -999 ; ieu = -1000 ; jeu = -1000 + else + isu = max(isu,isc) ; jsu = max(jsu,jsc) + ieu = min(ieu,iec) ; jeu = min(jeu,jec) + endif + + ! lump indices + isu=max(isu, isu_stag, isv_stag) + jsu=max(jsu, jsu_stag, jsv_stag) + jeu_stag=max(jeu, jeu_stag) + jev_stag=max(jeu, jev_stag) + ieu_stag=max(ieu ,ieu_stag) + iev_stag=max(ieu ,iev_stag) + + Atm(n)%neststruct%isu = isu + Atm(n)%neststruct%ieu = ieu_stag + Atm(n)%neststruct%jsu = jsu + Atm(n)%neststruct%jeu = jev_stag + + Atm(n)%neststruct%jeu_stag = jeu_stag + Atm(n)%neststruct%iev_stag = iev_stag + endif + enddo + + end subroutine mn_setup_update_regions + + + !================================================================================================== + ! + ! Recalculation Section -- Buffers that have to change size after nest motion + ! + !================================================================================================== + + !>@brief The subroutine 'reallocate_BC_buffers' reallocates boundary condition buffers - some need to change size after a nest move. + !>@details Thought they would be reallocated in boundary.F90 nested_grid_BC_recv() when needed, but seem not to. + subroutine reallocate_BC_buffers(Atm) + type(fv_atmos_type), intent(inout) :: Atm !< Single instance of atmospheric data + + integer :: n, ns + logical :: dummy = .false. ! same as grids_on_this_pe(n) + + call deallocate_fv_nest_BC_type(Atm%neststruct%delp_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%u_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%v_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%uc_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%vc_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%divg_BC) + + if (allocated(Atm%neststruct%q_BC)) then + do n=1,size(Atm%neststruct%q_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%q_BC(n)) + enddo + endif + +#ifndef SW_DYNAMICS + call deallocate_fv_nest_BC_type(Atm%neststruct%pt_BC) +#ifdef USE_COND + call deallocate_fv_nest_BC_type(Atm%neststruct%q_con_BC) +#ifdef MOIST_CAPPA + call deallocate_fv_nest_BC_type(Atm%neststruct%cappa_BC) +#endif +#endif + if (.not.Atm%flagstruct%hydrostatic) then + call deallocate_fv_nest_BC_type(Atm%neststruct%w_BC) + call deallocate_fv_nest_BC_type(Atm%neststruct%delz_BC) + endif +#endif + + ! Reallocate the buffers + + ns = Atm%neststruct%nsponge + + call allocate_fv_nest_BC_type(Atm%neststruct%delp_BC,Atm,ns,0,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%u_BC,Atm,ns,0,1,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%v_BC,Atm,ns,1,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%uc_BC,Atm,ns,1,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%vc_BC,Atm,ns,0,1,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%divg_BC,Atm,ns,1,1,dummy) + + ! if (ncnst > 0) then + ! allocate(Atm%neststruct%q_BC(ncnst)) + ! do n=1,ncnst + ! call allocate_fv_nest_BC_type(Atm%neststruct%q_BC(n),Atm,ns,0,0,dummy) + ! enddo + ! endif + + if (allocated(Atm%neststruct%q_BC)) then + do n=1,size(Atm%neststruct%q_BC) + call allocate_fv_nest_BC_type(Atm%neststruct%q_BC(n),Atm,ns,0,0,dummy) + enddo + endif + +#ifndef SW_DYNAMICS + call allocate_fv_nest_BC_type(Atm%neststruct%pt_BC,Atm,ns,0,0,dummy) +#ifdef USE_COND + call allocate_fv_nest_BC_type(Atm%neststruct%q_con_BC,Atm,ns,0,0,dummy) +#ifdef MOIST_CAPPA + call allocate_fv_nest_BC_type(Atm%neststruct%cappa_BC,Atm,ns,0,0,dummy) +#endif +#endif + if (.not.Atm%flagstruct%hydrostatic) then + call allocate_fv_nest_BC_type(Atm%neststruct%w_BC,Atm,ns,0,0,dummy) + call allocate_fv_nest_BC_type(Atm%neststruct%delz_BC,Atm,ns,0,0,dummy) + endif +#endif + + end subroutine reallocate_BC_buffers + + + !!============================================================================ + !! Step 8 -- Moving Nest Output to NetCDF + !!============================================================================ + + !>@brief The subroutine 'mn_prog_dump_to_netcdf' dumps selected prognostic variables to netCDF file. + !>@details Can be modified to output more of the prognostic variables if wanted. Certain 3D variables were commented out for performance. + subroutine mn_prog_dump_to_netcdf(Atm, time_val, file_prefix, is_fine_pe, domain_coarse, domain_fine, nz) + type(fv_atmos_type), intent(in) :: Atm !< Single instance of atmospheric data + integer, intent(in) :: time_val !< Timestep number + character(len=*), intent(in) :: file_prefix !< Filename prefix + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures + integer, intent(in) :: nz !< Number of vertical levels + + integer :: n_moist + character(len=16) :: out_var_name + integer :: position = CENTER + !integer :: position_u = NORTH + !integer :: position_v = EAST + + call mn_var_dump_to_netcdf(Atm%pt , is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "tempK") + !call mn_var_dump_to_netcdf(Atm%pt(:,:,64) , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "T64") + !call mn_var_dump_to_netcdf(Atm%delp , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "DELP") + call mn_var_dump_to_netcdf(Atm%delz , is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "DELZ") + call mn_var_dump_to_netcdf(Atm%q_con, is_fine_pe, domain_coarse, domain_fine, position, nz, & + time_val, Atm%global_tile, file_prefix, "qcon") + + !call mn_var_dump_to_netcdf(Atm%w , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "WWND") + !call mn_var_dump_to_netcdf(Atm%ua , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "UA") + !call mn_var_dump_to_netcdf(Atm%va , is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, "VA") + + call mn_var_dump_to_netcdf(Atm%ps , is_fine_pe, domain_coarse, domain_fine, position, & + time_val, Atm%global_tile, file_prefix, "PS") + + !! TODO figure out what to do with ze0; different bounds - only compute domain + + !! TODO Wind worked fine when in its own file. Can it merge in with the regular file?? + !!call mn_var_dump_to_netcdf(Atm%u, is_fine_pe, domain_coarse, domain_fine, position_u, nz, & + !! time_val, Atm%global_tile, "wxvarU", "UWND") + !!call mn_var_dump_to_netcdf(Atm%v, is_fine_pe, domain_coarse, domain_fine, position_v, nz, & + !! time_val, Atm%global_tile, "wxvarU", "VWND") + + ! Latitude and longitude in radians + call mn_var_dump_to_netcdf( Atm%gridstruct%agrid(:,:,2), is_fine_pe, domain_coarse, domain_fine, position, & + time_val, Atm%global_tile, file_prefix, "latrad") + call mn_var_dump_to_netcdf( Atm%gridstruct%agrid(:,:,1), is_fine_pe, domain_coarse, domain_fine, position, & + time_val, Atm%global_tile, file_prefix, "lonrad") + + !do n_moist = lbound(Atm%q, 4), ubound(Atm%q, 4) + ! call get_tracer_names(MODEL_ATMOS, n_moist, out_var_name) + ! call mn_var_dump_to_netcdf( Atm%q(:,:,:,n_moist), is_fine_pe, domain_coarse, domain_fine, position, nz, & + ! time_val, Atm%global_tile, file_prefix, trim(out_var_name)) + !enddo + + end subroutine mn_prog_dump_to_netcdf + + + !! Step 8 -- Moving Nest Output Individual Variables + + !>@brief The subroutine 'mn_var_dump_3d_to_netcdf' dumps a 3D single precision variable to netCDF file. + subroutine mn_var_dump_3d_to_netcdf( data_var, is_fine_pe, domain_coarse, domain_fine, position, nz, time_step, this_tile, file_prefix, var_name) + real, intent(in) :: data_var(:,:,:) !< Single precision model variable + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures + integer, intent(in) :: position, nz, time_step, this_tile !< Stagger, number vertical levels, timestep, tile number + character(len=*) :: file_prefix, var_name !< Filename prefix, and netCDF variable name + + integer :: isd_coarse, ied_coarse, jsd_coarse, jed_coarse + integer :: isd_fine, ied_fine, jsd_fine, jed_fine + integer :: this_pe + character(len=64) :: prefix_fine, prefix_coarse + + this_pe = mpp_pe() + + prefix_fine = trim(file_prefix) // "_fine" + prefix_coarse = trim(file_prefix) // "_coarse" + + !!=========================================================== + !! + !! Output the grid data from both nest grids and parent grids to netCDF + !! + !!=========================================================== + + if (is_fine_pe) then + call mpp_get_data_domain(domain_fine, isd_fine, ied_fine, jsd_fine, jed_fine, position=position) + + call output_grid_to_nc("GH", isd_fine, ied_fine, jsd_fine, jed_fine, nz, data_var, prefix_fine, var_name, time_step, domain_fine, position) + + else + if (this_tile == 6) then + !call mpp_get_compute_domain(domain_coarse, isc_coarse, iec_coarse, jsc_coarse, jec_coarse, position=position) + call mpp_get_data_domain(domain_coarse, isd_coarse, ied_coarse, jsd_coarse, jed_coarse, position=position) + !call mpp_get_memory_domain(domain_coarse, ism_coarse, iem_coarse, jsm_coarse, jem_coarse, position=position) + + call output_grid_to_nc("GH", isd_coarse, ied_coarse, jsd_coarse, jed_coarse, nz, data_var, prefix_coarse, var_name, time_step, domain_coarse, position) + + endif + endif + + end subroutine mn_var_dump_3d_to_netcdf + + !>@brief The subroutine 'mn_var_dump_2d_to_netcdf' dumps a 3D single precision variable to netCDF file. + subroutine mn_var_dump_2d_to_netcdf( data_var, is_fine_pe, domain_coarse, domain_fine, position, time_step, this_tile, file_prefix, var_name) + implicit none + real, intent(in) :: data_var(:,:) !< Data variable + logical, intent(in) :: is_fine_pe !< Is nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures + integer, intent(in) :: position, time_step, this_tile !< Stagger, number vertical levels, timestep, tile number + character(len=*) :: file_prefix, var_name !< Filename prefix, and netCDF variable name + + !integer :: isc_coarse, iec_coarse, jsc_coarse, jec_coarse + !integer :: isc_fine, iec_fine, jsc_fine, jec_fine + !integer :: ism_coarse, iem_coarse, jsm_coarse, jem_coarse + !integer :: ism_fine, iem_fine, jsm_fine, jem_fine + + integer :: isd_fine, ied_fine, jsd_fine, jed_fine + integer :: isd_coarse, ied_coarse, jsd_coarse, jed_coarse + integer :: this_pe + character(len=64) :: prefix_fine, prefix_coarse + + this_pe = mpp_pe() + + prefix_fine = trim(file_prefix) // "_fine" + prefix_coarse = trim(file_prefix) // "_coarse" + + !!=========================================================== + !! + !! Output the grid data from both nest grids and parent grids to netCDF + !! + !!=========================================================== + + if (is_fine_pe) then + ! Maybe don't need to call mpp_get_compute_domain here? + !call mpp_get_compute_domain(domain_fine, isc_fine, iec_fine, jsc_fine, jec_fine, position=position) + call mpp_get_data_domain(domain_fine, isd_fine, ied_fine, jsd_fine, jed_fine, position=position) + !call mpp_get_memory_domain(domain_fine, ism_fine, iem_fine, jsm_fine, jem_fine, position=position) + + call output_grid_to_nc("GH", isd_fine, ied_fine, jsd_fine, jed_fine, data_var, prefix_fine, var_name, time_step, domain_fine, position) + else + + if (this_tile == 6) then + !call mpp_get_compute_domain(domain_coarse, isc_coarse, iec_coarse, jsc_coarse, jec_coarse, position=position) + call mpp_get_data_domain(domain_coarse, isd_coarse, ied_coarse, jsd_coarse, jed_coarse, position=position) + !call mpp_get_memory_domain(domain_coarse, ism_coarse, iem_coarse, jsm_coarse, jem_coarse, position=position) + + call output_grid_to_nc("GH", isd_coarse, ied_coarse, jsd_coarse, jed_coarse, data_var, prefix_coarse, var_name, time_step, domain_coarse, position) + + endif + endif + + end subroutine mn_var_dump_2d_to_netcdf + + + !!========================================================================================= + !! Step 9 -- Perform vertical remapping on nest(s) and recalculate auxiliary pressures + !! Should help stabilize the fields before dynamics runs + !!========================================================================================= + + !>@brief The subroutine 'recalc_aux_pressures' updates auxiliary pressures after a nest move. + subroutine recalc_aux_pressures(Atm) + type(fv_atmos_type), intent(inout) :: Atm !< Single Atm structure + + ! Update the auxiliary pressure variables + ! In nest moving code, we moved delp and delz; this will update ps, pk, pe, peln, and pkz + ! Note this routine makes hydrostatic calculations (but has non-hydrostatic branches) + ! Perhaps not appropriate for a non-hydrostatic run. + ! May need to find or write a non-hydrostatic version of this routine + + ! TODO determine if this is the correct way to recalculate the auxiliary pressure variables + + call p_var(Atm%npz, Atm%bd%is, Atm%bd%ie, Atm%bd%js, Atm%bd%je, Atm%ptop, ptop_min, & + Atm%delp, Atm%delz, & + Atm%pt, Atm%ps, & + Atm%pe, Atm%peln, & + Atm%pk, Atm%pkz, kappa, & + Atm%q, Atm%ng, Atm%flagstruct%ncnst, Atm%gridstruct%area_64, 0., & + .false., .false., & !mountain argument not used + Atm%flagstruct%moist_phys, Atm%flagstruct%hydrostatic, & + Atm%flagstruct%nwat, Atm%domain, .false.) + + end subroutine recalc_aux_pressures + + + !================================================================================================== + ! + ! Utility Section -- After Step 9 + ! + !================================================================================================== + + !>@brief The subroutine 'init_ijk_mem' was copied from dyn_core.F90 to avoid circular dependencies + subroutine init_ijk_mem(i1, i2, j1, j2, km, array, var) + integer, intent(in):: i1, i2, j1, j2, km + real, intent(inout):: array(i1:i2,j1:j2,km) + real, intent(in):: var + integer:: i, j, k + + !$OMP parallel do default(none) shared(i1,i2,j1,j2,km,array,var) + do k=1,km + do j=j1,j2 + do i=i1,i2 + array(i,j,k) = var + enddo + enddo + enddo + + end subroutine init_ijk_mem + + !>@brief The function 'almost_equal' tests whether real values are within a tolerance of one another. + function almost_equal(a, b) + logical :: almost_equal + real, intent(in):: a,b + + real :: tolerance = 0.00001 + + if ( abs(a - b) < tolerance ) then + almost_equal = .true. + else + almost_equal = .false. + endif + end function almost_equal + + + + !>@brief The subroutine 'move_nest_geo' shifts tile_geo values using the data from fp_super_tile_geo + subroutine move_nest_geo(Atm, n, tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, delta_i_c, delta_j_c, x_refine, y_refine) + implicit none + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) !< Atm data array + integer, intent(in) :: n !< Grid numbers + type(grid_geometry), intent(inout) :: tile_geo !< A-grid tile geometry + type(grid_geometry), intent(inout) :: tile_geo_u !< u-wind tile geometry + type(grid_geometry), intent(inout) :: tile_geo_v !< v-wind tile geometry + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Parent high-resolution supergrid tile geometry + integer, intent(in) :: delta_i_c, delta_j_c, x_refine, y_refine !< delta i,j for nest move. Nest refinement. + + integer :: nest_x, nest_y, parent_x, parent_y + type(bbox) :: tile_bbox, fp_tile_bbox, tile_bbox_u, tile_bbox_v + integer :: i, j, fp_i, fp_j + integer :: this_pe + logical :: found + character(len=48) :: errstring + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + this_pe = mpp_pe() + + call fill_bbox(tile_bbox, tile_geo%lats) + call fill_bbox(tile_bbox_u, tile_geo_u%lats) + call fill_bbox(tile_bbox_v, tile_geo_v%lats) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + !! Calculate new parent alignment -- supergrid at the refine ratio + !! delta_{i,j}_c are at the coarse center grid resolution + !parent_x = parent_x + delta_i_c * 2 * x_refine + !parent_y = parent_y + delta_j_c * 2 * y_refine + + call calc_nest_alignment(Atm, n, nest_x, nest_y, parent_x, parent_y) + + ! Brute force repopulation of full tile_geo grids. + ! Optimization would be to use EOSHIFT and bring in just leading edge + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_i=", fp_i," is=",fp_tile_bbox%is," ie=",fp_tile_bbox%ie + call mpp_error(FATAL, "move_nest_geo invalid bounds tile_geo i: " // errstring) + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_j=", fp_j," js=",fp_tile_bbox%js," je=",fp_tile_bbox%je + call mpp_error(FATAL, "move_nest_geo invalid bounds tile_geo j " // errstring) + endif + + tile_geo%lats(i,j) = fp_super_tile_geo%lats(fp_i, fp_j) + tile_geo%lons(i,j) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + do i = tile_bbox_u%is, tile_bbox_u%ie + do j = tile_bbox_u%js, tile_bbox_u%je + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y - 1 + + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_i=", fp_i," is=",fp_tile_bbox%is," ie=",fp_tile_bbox%ie + call mpp_error(FATAL, "move_nest_geo invalid bounds tile_geo_u i " // errstring) + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_j=", fp_j," js=",fp_tile_bbox%js," je=",fp_tile_bbox%je + call mpp_error(FATAL, "move_nest_geo invalid bounds tile_geo_u j " // errstring) + endif + + tile_geo_u%lats(i,j) = fp_super_tile_geo%lats(fp_i, fp_j) + tile_geo_u%lons(i,j) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + do i = tile_bbox_v%is, tile_bbox_v%ie + do j = tile_bbox_v%js, tile_bbox_v%je + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y + + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_i=", fp_i," is=",fp_tile_bbox%is," ie=",fp_tile_bbox%ie + call mpp_error(FATAL, "move_nest_geo invalid bounds tile_geo_v i " // errstring) + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_j=", fp_j," js=",fp_tile_bbox%js," je=",fp_tile_bbox%je + call mpp_error(FATAL, "move_nest_geo invalid bounds tile_geo_v j " // errstring) + endif + + tile_geo_v%lats(i,j) = fp_super_tile_geo%lats(fp_i, fp_j) + tile_geo_v%lons(i,j) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + call check_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y, found) + + end subroutine move_nest_geo + + !>@brief The subroutine 'assign_n_p_grids' sets values for parent and nest grid arrays from the grid_geometry structures. + subroutine assign_n_p_grids(parent_geo, tile_geo, p_grid, n_grid, position) + type(grid_geometry), intent(in) :: parent_geo, tile_geo !< Parent geometry, nest geometry + real(kind=R_GRID), allocatable, intent(inout) :: p_grid(:,:,:) !< Parent grid + real(kind=R_GRID), allocatable, intent(inout) :: n_grid(:,:,:) !< Nest grid + integer, intent(in) :: position !< Grid offset + + integer :: i,j + + if (position == CENTER) then + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + enddo + enddo + + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i, 2*j) + p_grid(i, j, 2) = parent_geo%lats(2*i, 2*j) + enddo + enddo + + ! u(npx, npy+1) + elseif (position == NORTH) then ! u wind on D-stagger + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + enddo + enddo + + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i, 2*j-1) + p_grid(i, j, 2) = parent_geo%lats(2*i, 2*j-1) + enddo + enddo + + ! v(npx+1, npy) + elseif (position == EAST) then ! v wind on D-stagger + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + enddo + enddo + + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i-1, 2*j) + p_grid(i, j, 2) = parent_geo%lats(2*i-1, 2*j) + enddo + enddo + + endif + + end subroutine assign_n_p_grids + + !>@brief The subroutine 'assign_p_grids' sets values for parent grid arrays from the grid_geometry structures. This is static through the model run. + subroutine assign_p_grids(parent_geo, p_grid, position) + type(grid_geometry), intent(in) :: parent_geo !< Parent geometry + real(kind=R_GRID), allocatable, intent(inout) :: p_grid(:,:,:) !< Parent grid + integer, intent(in) :: position !< Grid offset + + integer :: i,j + + if (position == CENTER) then + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i, 2*j) + p_grid(i, j, 2) = parent_geo%lats(2*i, 2*j) + enddo + enddo + + ! u(npx, npy+1) + elseif (position == NORTH) then ! u wind on D-stagger + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i, 2*j-1) + p_grid(i, j, 2) = parent_geo%lats(2*i, 2*j-1) + enddo + enddo + + ! v(npx+1, npy) + elseif (position == EAST) then ! v wind on D-stagger + do j = 1, parent_geo%ny + do i = 1, parent_geo%nx + ! centered grid version + p_grid(i, j, 1) = parent_geo%lons(2*i-1, 2*j) + p_grid(i, j, 2) = parent_geo%lats(2*i-1, 2*j) + enddo + enddo + endif + + end subroutine assign_p_grids + + + + !>@brief The subroutine 'assign_n_grids' sets values for nest grid arrays from the grid_geometry structures. + subroutine assign_n_grids(tile_geo, n_grid, position) + type(grid_geometry), intent(in) :: tile_geo !< Parent geometry, nest geometry + real(kind=R_GRID), allocatable, intent(inout) :: n_grid(:,:,:) !< Nest grid + integer, intent(in) :: position !< Grid offset + + integer :: i,j + + if (position == CENTER) then + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + enddo + enddo + + ! u(npx, npy+1) + elseif (position == NORTH) then ! u wind on D-stagger + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + enddo + enddo + + ! v(npx+1, npy) + elseif (position == EAST) then ! v wind on D-stagger + do j = lbound(tile_geo%lats,2), ubound(tile_geo%lats,2) + do i = lbound(tile_geo%lats,1), ubound(tile_geo%lats,1) + ! centered grid version + n_grid(i, j, 1) = tile_geo%lons(i, j) + n_grid(i, j, 2) = tile_geo%lats(i, j) + enddo + enddo + + endif + + end subroutine assign_n_grids + + + + + !>@brief The subroutine 'calc_nest_halo_weights' calculates the interpolation weights + !>@details Computationally demanding; target for optimization after nest moves + subroutine calc_nest_halo_weights(bbox_fine, bbox_coarse, p_grid, n_grid, wt, istart_coarse, jstart_coarse, x_refine, y_refine) + implicit none + + type(bbox), intent(in) :: bbox_coarse, bbox_fine !< Bounding boxes of parent and nest + real(kind=R_GRID), allocatable, intent(in) :: p_grid(:,:,:), n_grid(:,:,:) !< Latlon rids of parent and nest in radians + real, allocatable, intent(inout) :: wt(:,:,:) !< Interpolation weight array + integer, intent(in) :: istart_coarse, jstart_coarse, x_refine, y_refine !< Offsets and nest refinements + + integer :: i,j, ic, jc + real :: dist1, dist2, dist3, dist4, sum + logical :: verbose = .false. + !logical :: verbose = .true. + + integer :: this_pe + + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: pi180 + real :: rad2deg, deg2rad + + pi180 = pi / 180.0 + deg2rad = pi / 180.0 + rad2deg = 1.0 / pi180 + + this_pe = mpp_pe() + + if ( bbox_coarse%is == 0 .and. bbox_coarse%ie == -1 ) then + ! Skip this one + ; + else + ! Calculate the bounding parent grid points for the nest grid point + ! Rely on the nest being aligned + ! code is from $CUBE/tools/fv_grid_tools.F90 + ! + + do j = bbox_fine%js, bbox_fine%je + ! F90 integer division truncates + jc = jstart_coarse + (j + y_refine/2 + 1) / y_refine + do i = bbox_fine%is, bbox_fine%ie + ic = istart_coarse + (i + x_refine/2 + 1) / x_refine + + ! dist2side_latlon takes points in longitude-latitude coordinates. + dist1 = dist2side_latlon(p_grid(ic,jc,:), p_grid(ic,jc+1,:), n_grid(i,j,:)) + dist2 = dist2side_latlon(p_grid(ic,jc+1,:), p_grid(ic+1,jc+1,:), n_grid(i,j,:)) + dist3 = dist2side_latlon(p_grid(ic+1,jc+1,:), p_grid(ic+1,jc,:), n_grid(i,j,:)) + dist4 = dist2side_latlon(p_grid(ic,jc,:), p_grid(ic+1,jc,:), n_grid(i,j,:)) + + wt(i,j,1)=dist2*dist3 ! ic, jc weight + wt(i,j,2)=dist3*dist4 ! ic, jc+1 weight + wt(i,j,3)=dist4*dist1 ! ic+1, jc+1 weight + wt(i,j,4)=dist1*dist2 ! ic+1, jc weight + + sum=wt(i,j,1)+wt(i,j,2)+wt(i,j,3)+wt(i,j,4) + wt(i,j,:)=wt(i,j,:)/sum + + enddo + enddo + endif + + end subroutine calc_nest_halo_weights + +end module fv_moving_nest_mod + diff --git a/moving_nest/fv_moving_nest_main.F90 b/moving_nest/fv_moving_nest_main.F90 new file mode 100644 index 000000000..c848e313f --- /dev/null +++ b/moving_nest/fv_moving_nest_main.F90 @@ -0,0 +1,1147 @@ +!*********************************************************************** +!* GNU General Public License * +!* This file is a part of fvGFS. * +!* * +!* fvGFS is free software; you can redistribute it and/or modify it * +!* and are expected to follow the terms of the GNU General Public * +!* License as published by the Free Software Foundation; either * +!* version 2 of the License, or (at your option) any later version. * +!* * +!* fvGFS is distributed in the hope that it will be useful, but * +!* WITHOUT ANY WARRANTY; without even the implied warranty of * +!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * +!* General Public License for more details. * +!* * +!* For the full text of the GNU General Public License, * +!* write to: Free Software Foundation, Inc., * +!* 675 Mass Ave, Cambridge, MA 02139, USA. * +!* or see: http://www.gnu.org/licenses/gpl.html * +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides top-level interface for moving nest functionality +!! @author W. Ramstrom, AOML/HRD 05/27/2021 +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + +module fv_moving_nest_main_mod + +#include + + !----------------- + ! FMS modules: + !----------------- + use block_control_mod, only: block_control_type + use constants_mod, only: cp_air, rdgas, grav, rvgas, kappa, pstd_mks + use time_manager_mod, only: time_type, get_time, get_date, set_time, operator(+), & + operator(-), operator(/), time_type_to_real + use fms_mod, only: file_exist, open_namelist_file, & + close_file, error_mesg, FATAL, & + check_nml_error, stdlog, & + write_version_number, & + mpp_clock_id, mpp_clock_begin, & + mpp_clock_end, CLOCK_SUBCOMPONENT, & + clock_flag_default + use mpp_mod, only: mpp_error, stdout, FATAL, WARNING, NOTE, & + input_nml_file, mpp_root_pe, & + mpp_npes, mpp_pe, mpp_chksum, & + mpp_get_current_pelist, & + mpp_set_current_pelist, mpp_sync + use mpp_parameter_mod, only: EUPDATE, WUPDATE, SUPDATE, NUPDATE + use mpp_domains_mod, only: domain2d, mpp_update_domains + use xgrid_mod, only: grid_box_type + use field_manager_mod, only: MODEL_ATMOS + use tracer_manager_mod, only: get_tracer_index, get_number_tracers, & + NO_TRACER, get_tracer_names + use DYCORE_typedefs, only: DYCORE_data_type +#ifdef GFS_TYPES + use GFS_typedefs, only: IPD_data_type => GFS_data_type, & + IPD_control_type => GFS_control_type, kind_phys +#else + use IPD_typedefs, only: IPD_data_type, IPD_control_type, kind_phys => IPD_kind_phys +#endif + + use fv_iau_mod, only: IAU_external_data_type +#ifdef MULTI_GASES + use multi_gases_mod, only: virq, virq_max, num_gas, ri, cpi +#endif + + !----------------- + ! FV core modules: + !----------------- + use atmosphere_mod, only: Atm, mygrid, p_split, dt_atmos + use fv_arrays_mod, only: fv_atmos_type, R_GRID, fv_grid_bounds_type, phys_diag_type + use fv_control_mod, only: ngrids + use fv_diagnostics_mod, only: fv_diag_init, fv_diag_reinit, fv_diag, fv_time, prt_maxmin, prt_height + use fv_restart_mod, only: fv_restart, fv_write_restart + use fv_timing_mod, only: timing_on, timing_off + use fv_mp_mod, only: is_master + use fv_regional_mod, only: start_regional_restart, read_new_bc_data, a_step, p_step, current_time_in_seconds + + !----------------------------------------- + ! External routines + !----------------------------------------- + use mpp_domains_mod, only: NORTH, NORTH_EAST, EAST, SOUTH_EAST, CORNER, CENTER + use mpp_domains_mod, only: nest_domain_type + use mpp_mod, only: mpp_sync, mpp_exit + use mpp_domains_mod, only: mpp_get_global_domain + use mpp_mod, only: mpp_send, mpp_sync_self, mpp_broadcast + + use fv_mp_mod, only: global_nest_domain + + use tracer_manager_mod, only: get_tracer_names + use field_manager_mod, only: MODEL_ATMOS + use fv_io_mod, only: fv_io_exit + !!use fv_restart_mod, only: d2c_setup + + !------------------------------------ + ! Moving Nest Routines + !------------------------------------ + + use fv_moving_nest_types_mod, only: allocate_fv_moving_nest_prog_type, allocate_fv_moving_nest_physics_type + use fv_moving_nest_types_mod, only: deallocate_fv_moving_nests + use fv_moving_nest_types_mod, only: Moving_nest + + ! Prognostic variable routines + use fv_moving_nest_mod, only: mn_prog_fill_intern_nest_halos, mn_prog_fill_nest_halos_from_parent, & + mn_prog_dump_to_netcdf, mn_prog_shift_data + ! Physics variable routines + use fv_moving_nest_physics_mod, only: mn_phys_fill_intern_nest_halos, mn_phys_fill_nest_halos_from_parent, & + mn_phys_dump_to_netcdf, mn_phys_shift_data, mn_phys_reset_sfc_props, move_nsst + + ! Metadata routines + use fv_moving_nest_mod, only: mn_meta_move_nest, mn_meta_recalc, mn_meta_reset_gridstruct, mn_shift_index + + ! Temporary variable routines (delz) + use fv_moving_nest_mod, only: mn_prog_fill_temp_variables, mn_prog_apply_temp_variables + use fv_moving_nest_physics_mod, only: mn_phys_fill_temp_variables, mn_phys_apply_temp_variables + + ! Load static datasets + use fv_moving_nest_mod, only: mn_latlon_read_hires_parent, mn_latlon_load_parent + use fv_moving_nest_mod, only: mn_orog_read_hires_parent, mn_static_read_hires + use fv_moving_nest_utils_mod, only: set_smooth_nest_terrain, set_blended_terrain + + use fv_moving_nest_physics_mod, only: mn_reset_phys_latlon, mn_surface_grids + + ! Grid reset routines + use fv_moving_nest_mod, only: grid_geometry + use fv_moving_nest_utils_mod, only: fill_grid_from_supergrid, fill_weight_grid + + ! Physics moving logical variables + use fv_moving_nest_physics_mod, only: move_physics, move_nsst + + ! Recalculation routines + use fv_moving_nest_mod, only: reallocate_BC_buffers, recalc_aux_pressures + + use fv_tracker_mod, only: Tracker, allocate_tracker, fv_tracker_init, deallocate_tracker + + implicit none + + !----------------------------------------------------------------------- + ! version number of this module + ! Include variable "version" to be written to log file. +#include + character(len=20) :: mod_name = 'fvGFS/fv_moving_nest_main_mod' + +#ifdef OVERLOAD_R4 + real, parameter:: real_snan=x'FFBFFFFF' +#else + real, parameter:: real_snan=x'FFF7FFFFFFFFFFFF' +#endif + + ! Enable these for more debugging outputs + logical :: debug_log = .false. ! Produces logging to out.* file + logical :: tsvar_out = .false. ! Produces netCDF outputs; be careful to not exceed file number limits set in namelist + + ! --- Clock ids for moving_nest performance metering + integer :: id_movnest1, id_movnest1_9, id_movnest2, id_movnest3, id_movnest4, id_movnest5 + integer :: id_movnest5_1, id_movnest5_2, id_movnest5_3, id_movnest5_4 + integer :: id_movnest6, id_movnest7_0, id_movnest7_1, id_movnest7_2, id_movnest7_3, id_movnest8, id_movnest9 + integer :: id_movnestTot + integer, save :: output_step = 0 + +contains + + !>@brief The subroutine 'update_moving_nest' decides whether the nest should be moved, and if so, performs the move. + !>@details This subroutine evaluates the automatic storm tracker (or prescribed motion configuration), then decides + !! if the nest should be moved. If it should be moved, it calls fv_moving_nest_exec() to perform the nest move. + subroutine update_moving_nest(Atm_block, IPD_control, IPD_data, time_step) + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + type(time_type), intent(in) :: time_step !< Current timestep + + logical :: do_move + integer :: delta_i_c, delta_j_c + integer :: parent_grid_num, child_grid_num, nest_num + integer, allocatable :: global_pelist(:) + integer :: n + integer :: this_pe + + this_pe = mpp_pe() + + do_move = .false. + + ! dt_atmos was initialized in atmosphere.F90::atmosphere_init() + + n = mygrid ! Public variable from atmosphere.F90 + + ! Hard-coded for now - these will need to be looked up on each PE when multiple and telescoped nests are enabled. + parent_grid_num = 1 + child_grid_num = 2 + nest_num = 1 + + call eval_move_nest(Atm, a_step, parent_grid_num, child_grid_num, do_move, delta_i_c, delta_j_c, dt_atmos) + + allocate(global_pelist(Atm(parent_grid_num)%npes_this_grid+Atm(child_grid_num)%npes_this_grid)) + global_pelist=(/Atm(parent_grid_num)%pelist, Atm(child_grid_num)%pelist/) + + call mpp_set_current_pelist(global_pelist) + call mpp_broadcast( delta_i_c, Atm(child_grid_num)%pelist(1), global_pelist ) + call mpp_broadcast( delta_j_c, Atm(child_grid_num)%pelist(1), global_pelist ) + call mpp_broadcast( do_move, Atm(child_grid_num)%pelist(1), global_pelist ) + call mpp_set_current_pelist(Atm(n)%pelist) + + if (do_move) then + call fv_moving_nest_exec(Atm, Atm_block, IPD_control, IPD_data, delta_i_c, delta_j_c, n, nest_num, parent_grid_num, child_grid_num, dt_atmos) + endif + + end subroutine update_moving_nest + + + + subroutine moving_nest_end() + integer :: n + + call deallocate_fv_moving_nests(ngrids) + + ! From fv_grid_utils.F90 + n = mygrid + + deallocate ( Atm(n)%gridstruct%area_c_64 ) + deallocate ( Atm(n)%gridstruct%dxa_64 ) + deallocate ( Atm(n)%gridstruct%dya_64 ) + deallocate ( Atm(n)%gridstruct%dxc_64 ) + deallocate ( Atm(n)%gridstruct%dyc_64 ) + deallocate ( Atm(n)%gridstruct%cosa_64 ) + deallocate ( Atm(n)%gridstruct%sina_64 ) + + end subroutine moving_nest_end + + + ! This subroutine sits in this file to have access to Atm structure + subroutine nest_tracker_init() + call fv_tracker_init(size(Atm)) + + if (mygrid .eq. 2) call allocate_tracker(mygrid, Atm(mygrid)%bd%isc, Atm(mygrid)%bd%iec, Atm(mygrid)%bd%jsc, Atm(mygrid)%bd%jec) + end subroutine nest_tracker_init + + subroutine nest_tracker_end() + call deallocate_tracker(ngrids) + end subroutine nest_tracker_end + + + + !>@brief The subroutine 'dump_moving_nest' outputs native grid format data to netCDF files + !>@details This subroutine exports model variables using FMS IO to netCDF files if tsvar_out is set to .True. + subroutine dump_moving_nest(Atm_block, IPD_control, IPD_data, time_step) + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(in) :: IPD_data(:) !< Physics variable data + type(time_type), intent(in) :: time_step !< Current timestep + + type(domain2d), pointer :: domain_coarse, domain_fine + logical :: is_fine_pe + integer :: parent_grid_num, child_grid_num, nz, this_pe, n + + this_pe = mpp_pe() + n = mygrid + + parent_grid_num = 1 + child_grid_num = 2 + + domain_fine => Atm(child_grid_num)%domain + domain_coarse => Atm(parent_grid_num)%domain + is_fine_pe = Atm(n)%neststruct%nested .and. ANY(Atm(n)%pelist(:) == this_pe) + nz = Atm(n)%npz + + ! Enable this to dump debug netCDF files. Files are automatically closed when dumped. + !if (mod(a_step, 80) .eq. 0 ) then + ! if (tsvar_out) call mn_prog_dump_to_netcdf(Atm(n), a_step, "tsavar", is_fine_pe, domain_coarse, domain_fine, nz) + ! if (tsvar_out) call mn_phys_dump_to_netcdf(Atm(n), Atm_block, IPD_control, IPD_data, a_step, "tsavar", is_fine_pe, domain_coarse, domain_fine, nz) + !endif + + end subroutine dump_moving_nest + + !>@brief The subroutine 'fv_moving_nest_init_clocks' intializes performance profiling timers of sections of the moving nest code. + !>@details Starts timers for subcomponents of moving nest code to determine performance. mpp routines group them into separate + !! sections for parent and nest PEs. + subroutine fv_moving_nest_init_clocks(use_timers) + logical, intent(in) :: use_timers + + ! --- initialize clocks for moving_nest + if (use_timers) then + id_movnest1 = mpp_clock_id ('MN Part 1 Init', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest1_9 = mpp_clock_id ('MN Part 1.9 Copy delz', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest2 = mpp_clock_id ('MN Part 2 Fill Halos from Parent', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest3 = mpp_clock_id ('MN Part 3 Meta Move Nest', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest4 = mpp_clock_id ('MN Part 4 Fill Intern Nest Halos', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest5 = mpp_clock_id ('MN Part 5 Recalc Weights', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest5_1 = mpp_clock_id ('MN Part 5.1 read_parent', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest5_2 = mpp_clock_id ('MN Part 5.2 reset latlon', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest5_3 = mpp_clock_id ('MN Part 5.3 meta recalc', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest5_4 = mpp_clock_id ('MN Part 5.4 shift indx', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + + id_movnest6 = mpp_clock_id ('MN Part 6 EOSHIFT', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + + id_movnest7_0 = mpp_clock_id ('MN Part 7.0 Recalc gridstruct', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest7_1 = mpp_clock_id ('MN Part 7.1 Refill halos from Parent', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest7_2 = mpp_clock_id ('MN Part 7.2 Refill Intern Nest Halos', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest7_3 = mpp_clock_id ('MN Part 7.3 Fill delz', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + + id_movnest8 = mpp_clock_id ('MN Part 8 Dump to netCDF', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + id_movnest9 = mpp_clock_id ('MN Part 9 Aux Pressure', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + endif + + id_movnestTot = mpp_clock_id ('Moving Nest Total', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + end subroutine fv_moving_nest_init_clocks + + !>@brief The subroutine 'eval_move_nest' determines whether the nest should be moved and in which direction. + !>@details This subroutine can execute prescribed motion or automated storm tracking based on namelist options. + subroutine eval_move_nest(Atm, a_step, parent_grid_num, child_grid_num, do_move, delta_i_c, delta_j_c, dt_atmos) + type(fv_atmos_type), intent(inout) :: Atm(:) !< Input atmospheric data + integer, intent(in) :: a_step !< Timestep + integer, intent(in) :: parent_grid_num, child_grid_num !< Grid numbers of parent and child + logical, intent(out) :: do_move !< Logical for whether to move nest + integer, intent(out) :: delta_i_c, delta_j_c !< Each can be -1, 0, or +1 + real, intent(in) :: dt_atmos !< only needed for the simple version of this subroutine + + integer :: n + integer :: cx, cy + real :: xdiff, ydiff + integer :: nest_i_c, nest_j_c + integer :: nis, nie, njs, nje + integer :: this_pe + character*255 :: message + + ! On the tropical channel configuration, tile 6 numbering starts at 0,0 off the coast of Spain + ! delta_i_c = +1 is westward + ! delta_i_c = -1 is eastward + ! + ! delta_j_c = +1 is southward + ! delta_j_c = -1 is northward + + this_pe = mpp_pe() + n = mygrid ! Public variable from atmosphere.F90 + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + + if ( Moving_nest(n)%mn_flag%vortex_tracker .eq. 0 .or. Atm(n)%grid_number .eq. 1) then + ! No need to move + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + else if ( Moving_nest(n)%mn_flag%vortex_tracker .eq. 1 ) then + ! Prescribed move according to ntrack, move_cd_x and move_cd_y + ! Move every ntrack of dt_atmos time step + if ( mod(a_step,Moving_nest(n)%mn_flag%ntrack) .eq. 0) then + do_move = .true. + delta_i_c = Moving_nest(n)%mn_flag%move_cd_x + delta_j_c = Moving_nest(n)%mn_flag%move_cd_y + endif + else if ( Moving_nest(n)%mn_flag%vortex_tracker .eq. 2 .or. & + Moving_nest(n)%mn_flag%vortex_tracker .eq. 6 .or. & + Moving_nest(n)%mn_flag%vortex_tracker .eq. 7 ) then + ! Automatic moving following the internal storm tracker + if ( mod(a_step,Moving_nest(n)%mn_flag%ntrack) .eq. 0) then + if(Tracker(n)%tracker_gave_up) then + call mpp_error(NOTE,'Not moving: tracker decided the storm dissapated') + return + endif + if(.not.Tracker(n)%tracker_havefix) then + call mpp_error(NOTE,'Not moving: tracker did not find a storm') + return + endif + ! Calcuate domain center indexes + cx=(Atm(n)%npx-1)/2+1 + cy=(Atm(n)%npy-1)/2+1 + ! Calculate distance in parent grid index space between storm + ! center and domain center + ! Consider using xydiff as integers in the future? + xdiff=(Tracker(n)%tracker_ifix-real(cx))/Atm(n)%neststruct%refinement + ydiff=(Tracker(n)%tracker_jfix-real(cy))/Atm(n)%neststruct%refinement + if(xdiff .ge. 1.0) then + Moving_nest(n)%mn_flag%move_cd_x=1 + else if(xdiff .le. -1.0) then + Moving_nest(n)%mn_flag%move_cd_x=-1 + else + Moving_nest(n)%mn_flag%move_cd_x=0 + endif + if(ydiff .ge. 1.0) then + Moving_nest(n)%mn_flag%move_cd_y=1 + else if(ydiff .le. -1.0) then + Moving_nest(n)%mn_flag%move_cd_y=-1 + else + Moving_nest(n)%mn_flag%move_cd_y=0 + endif + if(abs(Moving_nest(n)%mn_flag%move_cd_x)>0 .or. abs(Moving_nest(n)%mn_flag%move_cd_y)>0) then + call mpp_error(NOTE,'Moving: tracker center shifted from nest center') + do_move = .true. + delta_i_c = Moving_nest(n)%mn_flag%move_cd_x + delta_j_c = Moving_nest(n)%mn_flag%move_cd_y + else + call mpp_error(NOTE,'Not moving: tracker center is near nest center') + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + endif + endif + else + write(message,*) 'Wrong vortex_tracker option: ', Moving_nest(n)%mn_flag%vortex_tracker + call mpp_error(FATAL,message) + endif + + ! Override to prevent move on first timestep + if (a_step .eq. 0) then + do_move = .false. + delta_i_c = 0 + delta_j_c = 0 + endif + + ! Check whether or not the nest move is permitted + if (n==child_grid_num) then + ! Figure out the bounds of the cube face + + ! x parent bounds: 1 to Atm(parent_grid_num)%flagstruct%npx + ! y parent bounds: 1 to Atm(parent_grid_num)%flagstruct%npy + + ! Figure out the bounds of the nest + + ! x nest bounds: 1 to Atm(child_grid_num)%flagstruct%npx + ! y nest bounds: 1 to Atm(child_grid_num)%flagstruct%npy + + ! Nest refinement: Atm(child_grid_num)%neststruct%refinement + ! Nest starting cell in x direction: Atm(child_grid_num)%neststruct%ioffset + ! Nest starting cell in y direction: Atm(child_grid_num)%neststruct%joffset + + nest_i_c = ( Atm(child_grid_num)%flagstruct%npx - 1 ) / Atm(child_grid_num)%neststruct%refinement + nest_j_c = ( Atm(child_grid_num)%flagstruct%npy - 1 ) / Atm(child_grid_num)%neststruct%refinement + + nis = Atm(child_grid_num)%neststruct%ioffset + delta_i_c + nie = Atm(child_grid_num)%neststruct%ioffset + nest_i_c + delta_i_c + + njs = Atm(child_grid_num)%neststruct%joffset + delta_j_c + nje = Atm(child_grid_num)%neststruct%joffset + nest_j_c + delta_j_c + + ! Will the nest motion push the nest over one of the edges? + ! Handle each direction individually, so that nest could slide along edge + + ! Causes a crash if we use .le. 1 + if (nis .le. Moving_nest(child_grid_num)%mn_flag%corral_x) then + delta_i_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in x direction blocked. small nis: ', nis + call mpp_error(WARNING,message) + endif + if (njs .le. Moving_nest(child_grid_num)%mn_flag%corral_y) then + delta_j_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in y direction blocked. small njs: ', njs + call mpp_error(WARNING,message) + endif + + if (nie .ge. Atm(parent_grid_num)%flagstruct%npx - Moving_nest(child_grid_num)%mn_flag%corral_x) then + delta_i_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in x direction blocked. large nie: ', nie + call mpp_error(WARNING,message) + endif + if (nje .ge. Atm(parent_grid_num)%flagstruct%npy - Moving_nest(child_grid_num)%mn_flag%corral_y) then + delta_j_c = 0 + ! block_moves = .true. + write(message,*) 'eval_move_nest motion in y direction blocked. large nje: ', nje + call mpp_error(WARNING,message) + endif + + if (delta_i_c .eq. 0 .and. delta_j_c .eq. 0) then + do_move = .false. + endif + + endif + + write(message, *) 'eval_move_nest: move_cd_x=', delta_i_c, 'move_cd_y=', delta_j_c, 'do_move=', do_move + call mpp_error(NOTE,message) + + end subroutine eval_move_nest + + !>@brief The subroutine 'fv_moving_nest_exec' performs the nest move - most work occurs on nest PEs but some on parent PEs. + !>@details This subroutine shifts the prognostic and physics/surface variables. + !! It also updates metadata and interpolation weights. + subroutine fv_moving_nest_exec(Atm, Atm_block, IPD_control, IPD_data, delta_i_c, delta_j_c, n, nest_num, parent_grid_num, child_grid_num, dt_atmos) + implicit none + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Atmospheric variables + type(block_control_type), intent(in) :: Atm_block !< Physics block + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion increments + integer, intent(in) :: n, nest_num !< Nest indices + integer, intent(in) :: parent_grid_num, child_grid_num !< Grid numbers + real, intent(in) :: dt_atmos !< Timestep in seconds + + !---- Moving Nest local variables ----- + integer :: this_pe + integer, pointer :: ioffset, joffset + real, pointer, dimension(:,:,:) :: grid, agrid + type(domain2d), pointer :: domain_coarse, domain_fine + real(kind=R_GRID), pointer, dimension(:,:,:,:) :: grid_global + + ! Constants for mpp calls + integer :: position = CENTER + integer :: position_u = NORTH + integer :: position_v = EAST + logical :: do_move = .True. + integer :: x_refine, y_refine ! Currently equal, but allows for future flexibility + logical :: is_fine_pe + + ! TODO read halo size from the namelist instead to allow nest refinement > 3 + integer :: ehalo = 3 + integer :: whalo = 3 + integer :: nhalo = 3 + integer :: shalo = 3 + integer :: extra_halo = 0 ! Extra halo for moving nest routines + + integer :: istart_fine, iend_fine, jstart_fine, jend_fine + integer :: istart_coarse, iend_coarse, jstart_coarse, jend_coarse + integer :: nx, ny, nz, nx_cubic, ny_cubic + integer :: p_istart_fine, p_iend_fine, p_jstart_fine, p_jend_fine + + ! Parent tile data, saved between timesteps + logical, save :: first_nest_move = .true. + type(grid_geometry), save :: parent_geo + type(grid_geometry), save :: fp_super_tile_geo + type(mn_surface_grids), save :: mn_static + real(kind=R_GRID), allocatable, save :: p_grid(:,:,:) + real(kind=R_GRID), allocatable, save :: p_grid_u(:,:,:) + real(kind=R_GRID), allocatable, save :: p_grid_v(:,:,:) + + type(grid_geometry) :: tile_geo, tile_geo_u, tile_geo_v + real(kind=R_GRID), allocatable :: n_grid(:,:,:) + real(kind=R_GRID), allocatable :: n_grid_u(:,:,:) + real(kind=R_GRID), allocatable :: n_grid_v(:,:,:) + real, allocatable :: wt_h(:,:,:) ! TODO verify that these are deallocated + real, allocatable :: wt_u(:,:,:) + real, allocatable :: wt_v(:,:,:) + !real :: ua(isd:ied,jsd:jed) + !real :: va(isd:ied,jsd:jed) + + logical :: filtered_terrain = .True. ! TODO set this from namelist + integer :: i, j, x, y, z, p, nn, n_moist + integer :: parent_tile + logical :: found_nest_domain = .false. + + ! Variables to enable debugging use of mpp_sync + logical :: debug_sync = .false. + integer, allocatable :: full_pelist(:) + integer :: pp, p1, p2 + + ! Variables for parent side of setup_aligned_nest() + integer :: isg, ieg, jsg, jeg, gid + integer :: isc_p, iec_p, jsc_p, jec_p + integer :: upoff, jind + integer :: ng, refinement + integer :: npx, npy, npz, ncnst, pnats + integer :: isc, iec, jsc, jec + integer :: isd, ied, jsd, jed + integer :: nq ! number of transported tracers + integer :: is, ie, js, je, k ! For recalculation of omga + integer, save :: output_step = 0 + integer, allocatable :: pelist(:) + character(len=16) :: errstring + logical :: is_moving_nest !! TODO Refine this per Atm(n) structure to allow some static and some moving nests in same run + integer :: year, month, day, hour, minute, second + real(kind=R_GRID) :: pi = 4 * atan(1.0d0) + real :: rad2deg + logical :: use_timers + + rad2deg = 180.0 / pi + + gid = mpp_pe() + this_pe = mpp_pe() + + use_timers = Atm(n)%flagstruct%fv_timers + + allocate(pelist(mpp_npes())) + call mpp_get_current_pelist(pelist) + + ! Get month to use for reading static datasets + call get_date(Atm(n)%Time_init, year, month, day, hour, minute, second) + + ! mygrid and n are the same in atmosphere.F90 + npx = Atm(n)%npx + npy = Atm(n)%npy + npz = Atm(n)%npz + ncnst = Atm(n)%ncnst + pnats = Atm(n)%flagstruct%pnats + + isc = Atm(n)%bd%isc + iec = Atm(n)%bd%iec + jsc = Atm(n)%bd%jsc + jec = Atm(n)%bd%jec + + isd = isc - Atm(n)%bd%ng + ied = iec + Atm(n)%bd%ng + jsd = jsc - Atm(n)%bd%ng + jed = jec + Atm(n)%bd%ng + + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + nq = ncnst-pnats + + is_fine_pe = Atm(n)%neststruct%nested .and. ANY(Atm(n)%pelist(:) == this_pe) + + + if (first_nest_move) then + + call fv_moving_nest_init_clocks(Atm(n)%flagstruct%fv_timers) + + ! If NSST is turned off, do not move the NSST variables. + ! Namelist switches are confusing; this should be the correct way to distinguish, not using nst_anl + if (IPD_Control%nstf_name(1) == 0) then + move_nsst=.false. + else + move_nsst=.true. + endif + + ! This will only allocate the mn_prog and mn_phys for the active Atm(n), not all of them + ! The others can safely remain unallocated. + + call allocate_fv_moving_nest_prog_type(isd, ied, jsd, jed, npz, Moving_nest(n)%mn_prog) + call allocate_fv_moving_nest_physics_type(isd, ied, jsd, jed, npz, move_physics, move_nsst, & + IPD_Control%lsoil, IPD_Control%nmtvr, IPD_Control%levs, IPD_Control%ntot2d, IPD_Control%ntot3d, & + Moving_nest(n)%mn_phys) + + endif + + !================================================================================================== + ! + ! Begin moving nest code + ! W. Ramstrom - AOML/HRD/CIMAS 01/15/2021 + ! + !================================================================================================== + + !!================================================================ + !! Step 1 -- Initialization + !!================================================================ + + domain_fine => Atm(child_grid_num)%domain + parent_tile = Atm(child_grid_num)%neststruct%parent_tile + domain_coarse => Atm(parent_grid_num)%domain + is_moving_nest = Moving_nest(child_grid_num)%mn_flag%is_moving_nest + nz = Atm(n)%npz + + if (is_moving_nest .and. do_move) then + call mpp_clock_begin (id_movnestTot) + if (use_timers) call mpp_clock_begin (id_movnest1) + + !!================================================================ + !! Step 1.1 -- Show the nest grids - (now removed) + !!================================================================ + + + !!================================================================ + !! Step 1.2 -- Configure local variables + !!================================================================ + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + ioffset => Atm(child_grid_num)%neststruct%ioffset + joffset => Atm(child_grid_num)%neststruct%joffset + + istart_fine = global_nest_domain%istart_fine(nest_num) + iend_fine = global_nest_domain%iend_fine(nest_num) + jstart_fine = global_nest_domain%jstart_fine(nest_num) + jend_fine = global_nest_domain%jend_fine(nest_num) + + istart_coarse = global_nest_domain%istart_coarse(nest_num) + iend_coarse = global_nest_domain%iend_coarse(nest_num) + jstart_coarse = global_nest_domain%jstart_coarse(nest_num) + jend_coarse = global_nest_domain%jend_coarse(nest_num) + + ! Allocate the local weight arrays. TODO OPTIMIZE change to use the ones from the gridstruct + if (is_fine_pe) then + allocate(wt_h(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 4)) + wt_h = real_snan + + allocate(wt_u(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed+1, 4)) + wt_u = real_snan + + allocate(wt_v(Atm(child_grid_num)%bd%isd:Atm(child_grid_num)%bd%ied+1, Atm(child_grid_num)%bd%jsd:Atm(child_grid_num)%bd%jed, 4)) + wt_v = real_snan + else + allocate(wt_h(1,1,4)) + wt_h = 0.0 + + allocate(wt_u(1,1,4)) + wt_u = 0.0 + + allocate(wt_v(1,1,4)) + wt_v = 0.0 + endif + + ! This full list of PEs is used for the mpp_sync for debugging. Can later be removed. + p1 = size(Atm(1)%pelist) ! Parent PEs + p2 = size(Atm(2)%pelist) ! Nest PEs + + allocate(full_pelist(p1 + p2)) + do pp=1,p1 + full_pelist(pp) = Atm(1)%pelist(pp) + enddo + do pp=1,p2 + full_pelist(p1+pp) = Atm(2)%pelist(pp) + enddo + + !!============================================================================ + !! Step 1.3 -- Dump the prognostic variables before we do the nest motion. + !!============================================================================ + + output_step = output_step + 1 + + !!============================================================================ + !! Step 1.4 -- Read in the full panel grid definition + !!============================================================================ + + if (is_fine_pe) then + + nx_cubic = Atm(1)%npx - 1 + ny_cubic = Atm(1)%npy - 1 + + nx = Atm(n)%npx - 1 + ny = Atm(n)%npy - 1 + + grid => Atm(n)%gridstruct%grid + agrid => Atm(n)%gridstruct%agrid + + ! Read in static lat/lon data for parent at nest resolution; returns fp_ full panel variables + ! Also read in other static variables from the orography and surface files + + if (first_nest_move) then + + ! TODO set pelist for the correct nest instead of hard-coded Atm(2)%pelist to allow multiple moving nests + + call mn_latlon_read_hires_parent(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, fp_super_tile_geo, & + Moving_nest(child_grid_num)%mn_flag%surface_dir, parent_tile) + + call mn_orog_read_hires_parent(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, & + Moving_nest(child_grid_num)%mn_flag%surface_dir, filtered_terrain, & + mn_static%orog_grid, mn_static%orog_std_grid, mn_static%ls_mask_grid, mn_static%land_frac_grid, parent_tile) + + ! If terrain_smoother method 1 is chosen, we need the parent coarse terrain + if (Moving_nest(n)%mn_flag%terrain_smoother .eq. 1) then + if (filtered_terrain) then + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, 1, Atm(2)%pelist, Moving_nest(child_grid_num)%mn_flag%surface_dir, "oro_data", "orog_filt", mn_static%parent_orog_grid, parent_tile) + else + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, 1, Atm(2)%pelist, Moving_nest(child_grid_num)%mn_flag%surface_dir, "oro_data", "orog_raw", mn_static%parent_orog_grid, parent_tile) + endif + endif + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "substrate_temperature", "substrate_temperature", mn_static%deep_soil_temp_grid, parent_tile) + ! set any -999s to +4C + call mn_replace_low_values(mn_static%deep_soil_temp_grid, -100.0, 277.0) + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "soil_type", "soil_type", mn_static%soil_type_grid, parent_tile) + ! To match initialization behavior, set any -999s to 0 in soil_type + call mn_replace_low_values(mn_static%soil_type_grid, -100.0, 0.0) + + + !! TODO investigate reading high-resolution veg_frac and veg_greenness + !call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "", mn_static%veg_frac_grid) + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "vegetation_type", "vegetation_type", mn_static%veg_type_grid, parent_tile) + ! To match initialization behavior, set any -999s to 0 in veg_type + call mn_replace_low_values(mn_static%veg_type_grid, -100.0, 0.0) + + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "slope_type", "slope_type", mn_static%slope_type_grid, parent_tile) + ! To match initialization behavior, set any -999s to 0 in slope_type + call mn_replace_low_values(mn_static%slope_type_grid, -100.0, 0.0) + + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "maximum_snow_albedo", "maximum_snow_albedo", mn_static%max_snow_alb_grid, parent_tile) + ! Set any -999s to 0.5 + call mn_replace_low_values(mn_static%max_snow_alb_grid, -100.0, 0.5) + + ! Albedo fraction -- read and calculate + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "facsf", "facsf", mn_static%facsf_grid, parent_tile) + + allocate(mn_static%facwf_grid(lbound(mn_static%facsf_grid,1):ubound(mn_static%facsf_grid,1),lbound(mn_static%facsf_grid,2):ubound(mn_static%facsf_grid,2))) + + ! For land points, set facwf = 1.0 - facsf + ! To match initialization behavior, set any -999s to 0 + do i=lbound(mn_static%facsf_grid,1),ubound(mn_static%facsf_grid,1) + do j=lbound(mn_static%facsf_grid,2),ubound(mn_static%facsf_grid,2) + if (mn_static%facsf_grid(i,j) .lt. -100) then + mn_static%facsf_grid(i,j) = 0 + mn_static%facwf_grid(i,j) = 0 + else + mn_static%facwf_grid(i,j) = 1.0 - mn_static%facsf_grid(i,j) + endif + enddo + enddo + + ! Additional albedo variables + ! black sky = strong cosz -- direct sunlight + ! white sky = weak cosz -- diffuse light + + ! alvsf = visible strong cosz = visible_black_sky_albedo + ! alvwf = visible weak cosz = visible_white_sky_albedo + ! alnsf = near IR strong cosz = near_IR_black_sky_albedo + ! alnwf = near IR weak cosz = near_IR_white_sky_albedo + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "visible_black_sky_albedo", mn_static%alvsf_grid, parent_tile, time=month) + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "visible_white_sky_albedo", mn_static%alvwf_grid, parent_tile, time=month) + + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "near_IR_black_sky_albedo", mn_static%alnsf_grid, parent_tile, time=month) + call mn_static_read_hires(Atm(1)%npx, Atm(1)%npy, x_refine, Atm(2)%pelist, trim(Moving_nest(child_grid_num)%mn_flag%surface_dir), "snowfree_albedo", "near_IR_white_sky_albedo", mn_static%alnwf_grid, parent_tile, time=month) + + ! Set the -999s to small value of 0.06, matching initialization code in chgres + + call mn_replace_low_values(mn_static%alvsf_grid, -100.0, 0.06) + call mn_replace_low_values(mn_static%alvwf_grid, -100.0, 0.06) + call mn_replace_low_values(mn_static%alnsf_grid, -100.0, 0.06) + call mn_replace_low_values(mn_static%alnwf_grid, -100.0, 0.06) + + endif + + endif + + if (first_nest_move) first_nest_move = .false. + + if (use_timers) call mpp_clock_end (id_movnest1) + if (use_timers) call mpp_clock_begin (id_movnest1_9) + + !!===================================================================================== + !! Step 1.9 -- Allocate and fill the temporary variable(s) + !!===================================================================================== + + call mn_prog_fill_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + call mn_phys_fill_temp_variables(Atm, Atm_block, IPD_control, IPD_data, n, child_grid_num, is_fine_pe, npz) + + if (use_timers) call mpp_clock_end (id_movnest1_9) + if (use_timers) call mpp_clock_begin (id_movnest2) + + !!============================================================================ + !! Step 2 -- Fill in the halos from the coarse grids + !!============================================================================ + + ! The halos seem to be empty at least on the first model timestep. + ! These calls need to be executed by the parent and nest PEs in order to do the communication + ! This is before any nest motion has occurred + + call mn_prog_fill_nest_halos_from_parent(Atm, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + call mn_phys_fill_nest_halos_from_parent(Atm, IPD_control, IPD_data, mn_static, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + + if (use_timers) call mpp_clock_end (id_movnest2) + if (use_timers) call mpp_clock_begin (id_movnest3) + + !!============================================================================ + !! Step 3 -- Redefine the nest domain to new location + !! This calls mpp_define_nest_domains. Following the code in fv_control.F90, only should + !! be executed on the nest PEs. Operates only on indices. + !! -- Similar to med_nest_configure() from HWRF + !!============================================================================ + + call mn_meta_move_nest(delta_i_c, delta_j_c, pelist, is_fine_pe, extra_halo, & + global_nest_domain, domain_fine, domain_coarse, & + istart_coarse, iend_coarse, jstart_coarse, jend_coarse, & + istart_fine, iend_fine, jstart_fine, jend_fine) + + ! This code updates the values in neststruct; ioffset/joffset are pointers: ioffset => Atm(child_grid_num)%neststruct%ioffset + ioffset = ioffset + delta_i_c + joffset = joffset + delta_j_c + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest3) + if (use_timers) call mpp_clock_begin (id_movnest4) + + !!============================================================================ + !! Step 4 -- Fill the internal nest halos for the prognostic variables, + !! then physics variables + !! Only acts on the nest PEs + !! -- similar to med_nest_initial + !!============================================================================ + + ! TODO should/can this run before the mn_meta_move_nest? + if (is_fine_pe) then + call mn_prog_fill_intern_nest_halos(Atm(n), domain_fine, is_fine_pe) + call mn_phys_fill_intern_nest_halos(Moving_nest(n), IPD_control, IPD_data, domain_fine, is_fine_pe) + endif + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest4) + if (use_timers) call mpp_clock_begin (id_movnest5) + + !!============================================================================ + !! Step 5 -- Recalculate nest halo weights (for fine PEs only) and indices + !! -- Similiar to med_nest_weights + !!============================================================================ + + if (is_fine_pe) then + !!============================================================================ + !! Step 5.1 -- Fill the p_grid* and n_grid* variables + !!============================================================================ + if (use_timers) call mpp_clock_begin (id_movnest5_1) + + ! parent_geo, p_grid, p_grid_u, and p_grid_v are only loaded first time; afterwards they are reused. + ! Because they are the coarse resolution grids (supergrid, a-grid, u stagger, v stagger) for the parent + call mn_latlon_load_parent(Moving_nest(child_grid_num)%mn_flag%surface_dir, Atm, n, parent_tile, & + delta_i_c, delta_j_c, Atm(2)%pelist, child_grid_num, & + parent_geo, tile_geo, tile_geo_u, tile_geo_v, fp_super_tile_geo, & + p_grid, n_grid, p_grid_u, n_grid_u, p_grid_v, n_grid_v) + + if (use_timers) call mpp_clock_end (id_movnest5_1) + if (use_timers) call mpp_clock_begin (id_movnest5_2) + + ! tile_geo holds the center lat/lons for the entire nest (all PEs). + call mn_reset_phys_latlon(Atm, n, tile_geo, fp_super_tile_geo, Atm_block, IPD_control, IPD_data) + + if (use_timers) call mpp_clock_end (id_movnest5_2) + if (use_timers) call mpp_clock_begin (id_movnest5_3) + + !!============================================================================ + !! Step 5.2 -- Fill the wt* variables for each stagger + !!============================================================================ + + call mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo, parent_geo, fp_super_tile_geo, & + is_fine_pe, global_nest_domain, position, p_grid, n_grid, wt_h, istart_coarse, jstart_coarse) + + call mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo_u, parent_geo, fp_super_tile_geo, & + is_fine_pe, global_nest_domain, position_u, p_grid_u, n_grid_u, wt_u, istart_coarse, jstart_coarse) + + call mn_meta_recalc( delta_i_c, delta_j_c, x_refine, y_refine, tile_geo_v, parent_geo, fp_super_tile_geo, & + is_fine_pe, global_nest_domain, position_v, p_grid_v, n_grid_v, wt_v, istart_coarse, jstart_coarse) + + if (use_timers) call mpp_clock_end (id_movnest5_3) + endif + + if (use_timers) call mpp_clock_begin (id_movnest5_4) + + !!============================================================================ + !! Step 5.3 -- Adjust the indices by the values of delta_i_c, delta_j_c + !!============================================================================ + + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_h) + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_u) + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_v) + call mn_shift_index(delta_i_c, delta_j_c, Atm(child_grid_num)%neststruct%ind_b) + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest5_4) + + if (use_timers) call mpp_clock_end (id_movnest5) + if (use_timers) call mpp_clock_begin (id_movnest6) + + !!============================================================================ + !! Step 6 Shift the data on each nest PE + !! -- similar to med_nest_move in HWRF + !!============================================================================ + + call mn_prog_shift_data(Atm, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, & + is_fine_pe, global_nest_domain, nz) + + call mn_phys_shift_data(Atm, IPD_control, IPD_data, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, & + is_fine_pe, global_nest_domain, nz) + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest6) + if (use_timers) call mpp_clock_begin (id_movnest7_0) + + !!===================================================================================== + !! Step 7 -- Reset the grid definition data and buffer sizes and weights after the nest motion + !! Mostly needed when dynamics is executed + !!===================================================================================== + + call mn_meta_reset_gridstruct(Atm, n, child_grid_num, global_nest_domain, fp_super_tile_geo, x_refine, y_refine, is_fine_pe, wt_h, wt_u, wt_v, a_step, dt_atmos) + + if (use_timers) call mpp_clock_end (id_movnest7_0) + if (use_timers) call mpp_clock_begin (id_movnest7_1) + + !!===================================================================================== + !! Step 7.01 -- Reset the orography data that was read from the hires static file + !! + !!===================================================================================== + + if (is_fine_pe) then + ! phis is allocated in fv_arrays.F90 as: allocate ( Atm%phis(isd:ied ,jsd:jed ) ) + ! 0 -- all high-resolution data, 1 - static nest smoothing algorithm, 5 - 5 point smoother, 9 - 9 point smoother + ! Defaults to 1 - static nest smoothing algorithm; this seems to produce the most stable solutions + + select case(Moving_nest(n)%mn_flag%terrain_smoother) + case (0) + ! High-resolution terrain for entire nest + Atm(n)%phis(isd:ied, jsd:jed) = mn_static%orog_grid((ioffset-1)*x_refine+isd:(ioffset-1)*x_refine+ied, (joffset-1)*y_refine+jsd:(joffset-1)*y_refine+jed) * grav + case (1) + ! Static nest smoothing algorithm - interpolation of coarse terrain in halo zone and 5 point blending zone of coarse and fine data + call set_blended_terrain(Atm(n), mn_static%parent_orog_grid, mn_static%orog_grid, x_refine, Atm(n)%bd%ng, 5, a_step) + case (2) + ! Static nest smoothing algorithm - interpolation of coarse terrain in halo zone and 5 point blending zone of coarse and fine data + call set_blended_terrain(Atm(n), mn_static%parent_orog_grid, mn_static%orog_grid, x_refine, Atm(n)%bd%ng, 10, a_step) + case (5) + ! 5 pt smoother. blend zone of 5 to match static nest + call set_smooth_nest_terrain(Atm(n), mn_static%orog_grid, x_refine, 5, Atm(n)%bd%ng, 5) + case (9) + ! 9 pt smoother. blend zone of 5 to match static nest + call set_smooth_nest_terrain(Atm(n), mn_static%orog_grid, x_refine, 9, Atm(n)%bd%ng, 5) + case default + write (errstring, "(I0)") Moving_nest(n)%mn_flag%terrain_smoother + call mpp_error(FATAL,'Invalid terrain_smoother in fv_moving_nest_main '//errstring) + end select + + ! Reinitialize diagnostics -- zsurf which is g * Atm%phis + call fv_diag_reinit(Atm(n:n)) + + ! sgh and oro were only fully allocated if fv_land is True + ! if false, oro is (1,1), and sgh is not allocated + if ( Atm(n)%flagstruct%fv_land ) then + ! oro and sgh are allocated only for the compute domain -- they do not have halos + + !fv_arrays.F90 oro() !< land fraction (1: all land; 0: all water) + !real, _ALLOCATABLE :: oro(:,:) _NULL !< land fraction (1: all land; 0: all water) + !real, _ALLOCATABLE :: sgh(:,:) _NULL !< Terrain standard deviation + + Atm(n)%oro(isc:iec, jsc:jec) = mn_static%land_frac_grid((ioffset-1)*x_refine+isc:(ioffset-1)*x_refine+iec, (joffset-1)*y_refine+jsc:(joffset-1)*y_refine+jec) + Atm(n)%sgh(isc:iec, jsc:jec) = mn_static%orog_std_grid((ioffset-1)*x_refine+isc:(ioffset-1)*x_refine+iec, (joffset-1)*y_refine+jsc:(joffset-1)*y_refine+jec) + endif + + call mn_phys_reset_sfc_props(Atm, n, mn_static, Atm_block, IPD_data, ioffset, joffset, x_refine) + endif + + !!===================================================================================== + !! Step 7.1 Refill the nest edge halos from parent grid after nest motion + !! Parent and nest PEs need to execute these subroutines + !!===================================================================================== + + ! Refill the halos around the edge of the nest from the parent + call mn_prog_fill_nest_halos_from_parent(Atm, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + call mn_phys_fill_nest_halos_from_parent(Atm, IPD_control, IPD_data, mn_static, n, child_grid_num, is_fine_pe, global_nest_domain, nz) + + if (use_timers) call mpp_clock_end (id_movnest7_1) + + if (is_fine_pe) then + if (use_timers) call mpp_clock_begin (id_movnest7_2) + + ! Refill the internal halos after nest motion + call mn_prog_fill_intern_nest_halos(Atm(n), domain_fine, is_fine_pe) + call mn_phys_fill_intern_nest_halos(Moving_nest(n), IPD_control, IPD_data, domain_fine, is_fine_pe) + + if (use_timers) call mpp_clock_end (id_movnest7_2) + endif + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + !!===================================================================================== + !! Step 7.3 -- Apply the temporary variable to the prognostics and physics structures + !!===================================================================================== + if (use_timers) call mpp_clock_begin (id_movnest7_3) + + call mn_prog_apply_temp_variables(Atm, n, child_grid_num, is_fine_pe, npz) + call mn_phys_apply_temp_variables(Atm, Atm_block, IPD_control, IPD_data, n, child_grid_num, is_fine_pe, npz) + + if (use_timers) call mpp_clock_end (id_movnest7_3) + if (use_timers) call mpp_clock_begin (id_movnest8) + + !!============================================================================ + !! Step 8 -- Dump to netCDF + !!============================================================================ + + + if (is_fine_pe) then + do i=isc,iec + do j=jsc,jec + ! EMIS PATCH - Force to positive at all locations matching the landmask + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%emis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 2 .and. Moving_nest(n)%mn_phys%emis_ice(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_ice(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 0 .and. Moving_nest(n)%mn_phys%emis_wat(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_wat(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdirnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) = 0.5 + !if (Moving_nest(n)%mn_phys%slmsk(i,j) .eq. 1 .and. Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) = 0.5 + + ! EMIS PATCH - Force to positive at all locations. + if (Moving_nest(n)%mn_phys%emis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%emis_ice(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_ice(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%emis_wat(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%emis_wat(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdirnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdirvis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifvis_lnd(i,j) = 0.5 + if (Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) .lt. 0.0) Moving_nest(n)%mn_phys%albdifnir_lnd(i,j) = 0.5 + + enddo + enddo + endif + + output_step = output_step + 1 + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + if (use_timers) call mpp_clock_end (id_movnest8) + if (use_timers) call mpp_clock_begin (id_movnest9) + + !!========================================================================================= + !! Step 9 -- Recalculate auxiliary pressures + !! Should help stabilize the fields before dynamics runs + !! TODO Consider whether vertical remapping, recalculation of omega, interpolation of winds + !! to A or C grids, and/or divergence recalculation are needed here. + !!========================================================================================= + + if (is_fine_pe) then + call recalc_aux_pressures(Atm(n)) + endif + + output_step = output_step + 1 + endif + + if (use_timers) call mpp_clock_end (id_movnest9) + call mpp_clock_end (id_movnestTot) + + if (debug_sync) call mpp_sync(full_pelist) ! Used to make debugging easier. Can be removed. + + !call compare_terrain("phis", Atm(n)%phis, 1, Atm(n)%neststruct%ind_h, x_refine, y_refine, is_fine_pe, global_nest_domain) + + !deallocate(tile_geo%lats, tile_geo%lons) + !deallocate(tile_geo_u%lats, tile_geo_u%lons) + !deallocate(tile_geo_v%lats, tile_geo_v%lons) + + !deallocate(p_grid, n_grid) + !deallocate(p_grid_u, n_grid_u) + !deallocate(p_grid_v, n_grid_v) + + end subroutine fv_moving_nest_exec + + !>@brief The subroutine 'mn_replace_low_values' replaces low values with a default value. + subroutine mn_replace_low_values(data_grid, low_value, new_value) + real, _ALLOCATABLE, intent(inout) :: data_grid(:,:) !< 2D grid of data + real, intent(in) :: low_value !< Low value to check for; e.g. negative or fill value + real, intent(in) :: new_value !< Value to replace low value with + + integer :: i, j + + do i=lbound(data_grid,1),ubound(data_grid,1) + do j=lbound(data_grid,2),ubound(data_grid,2) + if (data_grid(i,j) .le. low_value) data_grid(i,j) = new_value + enddo + enddo + end subroutine mn_replace_low_values + +end module fv_moving_nest_main_mod + diff --git a/moving_nest/fv_moving_nest_physics.F90 b/moving_nest/fv_moving_nest_physics.F90 new file mode 100644 index 000000000..873964fa5 --- /dev/null +++ b/moving_nest/fv_moving_nest_physics.F90 @@ -0,0 +1,1436 @@ +!*********************************************************************** +!* GNU General Public License * +!* This file is a part of fvGFS. * +!* * +!* fvGFS is free software; you can redistribute it and/or modify it * +!* and are expected to follow the terms of the GNU General Public * +!* License as published by the Free Software Foundation; either * +!* version 2 of the License, or (at your option) any later version. * +!* * +!* fvGFS is distributed in the hope that it will be useful, but * +!* WITHOUT ANY WARRANTY; without even the implied warranty of * +!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * +!* General Public License for more details. * +!* * +!* For the full text of the GNU General Public License, * +!* write to: Free Software Foundation, Inc., * +!* 675 Mass Ave, Cambridge, MA 02139, USA. * +!* or see: http://www.gnu.org/licenses/gpl.html * +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides Moving Nest functionality for physics and surface variables +!! @author W. Ramstrom. Collaboration with Bin Liu and Chunxi Zhang, EMC +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + + +! =======================================================================! +! +! Notes +! +!------------------------------------------------------------------------ +! Moving Nest Subroutine Naming Convention +!----------------------------------------------------------------------- +! +! mn_meta_* subroutines perform moving nest operations for FV3 metadata. +! These routines will run only once per nest move. +! +! mn_var_* subroutines perform moving nest operations for an individual FV3 variable. +! These routines will run many times per nest move. +! +! mn_prog_* subroutines perform moving nest operations for the list of prognostic fields. +! These routines will run only once per nest move. +! +! mn_phys_* subroutines perform moving nest operations for the list of physics fields. +! These routines will run only once per nest move. +! +! =======================================================================! + +module fv_moving_nest_physics_mod + + use block_control_mod, only: block_control_type + use mpp_mod, only: mpp_pe, mpp_sync, mpp_sync_self, mpp_send, mpp_error, NOTE, FATAL + use mpp_domains_mod, only: mpp_update_domains, mpp_get_data_domain, mpp_get_global_domain + use mpp_domains_mod, only: mpp_define_nest_domains, mpp_shift_nest_domains, nest_domain_type, domain2d + use mpp_domains_mod, only: mpp_get_C2F_index, mpp_update_nest_fine + use mpp_domains_mod, only: mpp_get_F2C_index, mpp_update_nest_coarse + use mpp_domains_mod, only: NORTH, SOUTH, EAST, WEST, CORNER, CENTER + use mpp_domains_mod, only: NUPDATE, SUPDATE, EUPDATE, WUPDATE, DGRID_NE + +#ifdef GFS_TYPES + use GFS_typedefs, only: IPD_data_type => GFS_data_type, & + IPD_control_type => GFS_control_type, kind_phys +#else + use IPD_typedefs, only: IPD_data_type, IPD_control_type, kind_phys => IPD_kind_phys +#endif + use GFS_init, only: GFS_grid_populate + + use boundary_mod, only: update_coarse_grid, update_coarse_grid_mpp + use constants_mod, only: cp_air, rdgas, grav, rvgas, kappa, pstd_mks, hlv + use field_manager_mod, only: MODEL_ATMOS + use fv_arrays_mod, only: fv_atmos_type, fv_nest_type, fv_grid_type, R_GRID + use fv_moving_nest_types_mod, only: fv_moving_nest_prog_type, fv_moving_nest_physics_type, mn_surface_grids, fv_moving_nest_type + use fv_arrays_mod, only: allocate_fv_nest_bc_type, deallocate_fv_nest_bc_type + use fv_grid_tools_mod, only: init_grid + use fv_grid_utils_mod, only: grid_utils_init, ptop_min, dist2side_latlon + use fv_mapz_mod, only: Lagrangian_to_Eulerian, moist_cv, compute_total_energy + use fv_nesting_mod, only: dealloc_nested_buffers + use fv_nwp_nudge_mod, only: do_adiabatic_init + use init_hydro_mod, only: p_var + use tracer_manager_mod, only: get_tracer_index, get_tracer_names + use fv_moving_nest_utils_mod, only: alloc_halo_buffer, grid_geometry, output_grid_to_nc + use fv_moving_nest_utils_mod, only: fill_nest_from_buffer, fill_nest_from_buffer_cell_center, fill_nest_from_buffer_nearest_neighbor + use fv_moving_nest_utils_mod, only: fill_nest_halos_from_parent, fill_grid_from_supergrid, fill_weight_grid + use fv_moving_nest_utils_mod, only: alloc_read_data + use fv_moving_nest_utils_mod, only: fill_nest_from_buffer_cell_center_masked + use fv_moving_nest_utils_mod, only: fill_nest_halos_from_parent_masked + + use fv_moving_nest_mod, only: mn_var_fill_intern_nest_halos, mn_var_dump_to_netcdf, mn_var_shift_data, calc_nest_alignment + use fv_moving_nest_types_mod, only: Moving_nest + implicit none + +#ifdef NO_QUAD_PRECISION + ! 64-bit precision (kind=8) + integer, parameter:: f_p = selected_real_kind(15) +#else + ! Higher precision (kind=16) for grid geometrical factors: + integer, parameter:: f_p = selected_real_kind(20) +#endif + +#ifdef OVERLOAD_R4 + real, parameter:: real_snan=x'FFBFFFFF' +#else + real, parameter:: real_snan=x'FFF7FFFFFFFFFFFF' +#endif + + logical :: debug_log = .false. + logical :: move_physics = .true. ! Always true, unless developer sets move_physics to .False. here for debugging. + logical :: move_nsst = .true. ! Value is reset in fv_moving_nest_main.F90 from namelist options + + ! Persistent variables to enable debug printing after range warnings. + type (fv_atmos_type), pointer :: save_Atm_n + type (block_control_type), pointer :: save_Atm_block + type(IPD_control_type), pointer :: save_IPD_Control + type(IPD_data_type), pointer :: save_IPD_Data(:) + +#include + +contains + + !>@brief The subroutine 'mn_phys_reset_sfc_props' sets the static surface parameters from the high-resolution input file data + !>@details This subroutine relies on earlier code reading the data from files into the mn_static data structure + !! This subroutine does not yet handle ice points or frac_grid - fractional landfrac/oceanfrac values + subroutine mn_phys_reset_sfc_props(Atm, n, mn_static, Atm_block, IPD_data, ioffset, joffset, refine) + type(fv_atmos_type), intent(inout),allocatable :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n !< Current grid number + type(mn_surface_grids), intent(in) :: mn_static !< Static surface data + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + integer, intent(in) :: ioffset, joffset !< Current nest offset in i,j direction + integer, intent(in) :: refine !< Nest refinement ratio + + ! For iterating through physics/surface vector data + integer :: nb, blen, ix, i_pe, j_pe, i_idx, j_idx + real(kind=kind_phys) :: phys_oro + + ! Setup local land sea mask grid for masked interpolations + do i_pe = Atm(n)%bd%isd, Atm(n)%bd%ied + do j_pe = Atm(n)%bd%jsd, Atm(n)%bd%jed + i_idx = (ioffset-1)*refine + i_pe + j_idx = (joffset-1)*refine + j_pe + + Moving_nest(n)%mn_phys%slmsk(i_pe, j_pe) = mn_static%ls_mask_grid(i_idx, j_idx) + enddo + enddo + + ! Reset the variables from the fix_sfc files + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i_pe = Atm_block%index(nb)%ii(ix) + j_pe = Atm_block%index(nb)%jj(ix) + + i_idx = (ioffset-1)*refine + i_pe + j_idx = (joffset-1)*refine + j_pe + + ! Reset the land sea mask from the hires parent data + IPD_data(nb)%Sfcprop%slmsk(ix) = mn_static%ls_mask_grid(i_idx, j_idx) + + ! IFD values are 0 for land, and 1 for oceans/lakes -- reverse of the land sea mask + ! Land Sea Mask has values of 0 for oceans/lakes, 1 for land, 2 for sea ice + ! TODO figure out what ifd should be for sea ice + if (mn_static%ls_mask_grid(i_idx, j_idx) .eq. 1 ) then + if (move_nsst) IPD_data(nb)%Sfcprop%ifd(ix) = 0 ! Land + IPD_data(nb)%Sfcprop%oceanfrac(ix) = 0 ! Land -- TODO permit fractions + IPD_data(nb)%Sfcprop%landfrac(ix) = 1 ! Land -- TODO permit fractions + else + if (move_nsst) IPD_data(nb)%Sfcprop%ifd(ix) = 1 ! Ocean + IPD_data(nb)%Sfcprop%oceanfrac(ix) = 1 ! Ocean -- TODO permit fractions + IPD_data(nb)%Sfcprop%landfrac(ix) = 0 ! Ocean -- TODO permit fractions + endif + + IPD_data(nb)%Sfcprop%tg3(ix) = mn_static%deep_soil_temp_grid(i_idx, j_idx) + + ! Follow logic from FV3/io/FV3GFS_io.F90 line 1187 + ! TODO this will need to be more complicated if we support frac_grid + !if (nint(mn_static%soil_type_grid(i_idx, j_idx)) == 14 .or. int(mn_static%soil_type_grid(i_idx, j_idx)+0.5) <= 0) then + !if (nint(mn_static%soil_type_grid(i_idx, j_idx)) == 14 .or. + + !if ( (mn_static%ls_mask_grid(i_idx, j_idx) .eq. 1 .and. nint(mn_static%land_frac_grid(i_idx, j_idx)) == 0) .or. & + ! mn_static%soil_type_grid(i_idx, j_idx) < 0.5) then + if (mn_static%ls_mask_grid(i_idx, j_idx) .eq. 1 .and. nint(mn_static%land_frac_grid(i_idx, j_idx)) == 0 ) then + ! Water soil type == lake, etc. -- override the other variables and make this water + !!print '("mn_phys_reset_sfc_props LAKE SOIL npe=",I0," x,y=",I0,",",I0," lat=",F10.3," lon=",F10.3)', mpp_pe(), i_idx, j_idx, IPD_data(nb)%Grid%xlat_d(ix), IPD_data(nb)%Grid%xlon_d(ix)-360.0 + + if (move_nsst) IPD_data(nb)%Sfcprop%ifd(ix) = 1 ! Ocean + IPD_data(nb)%Sfcprop%oceanfrac(ix) = 1 ! Ocean -- TODO permit fractions + IPD_data(nb)%Sfcprop%landfrac(ix) = 0 ! Ocean -- TODO permit fractions + + IPD_data(nb)%Sfcprop%stype(ix) = 0 + IPD_data(nb)%Sfcprop%slmsk(ix) = 0 + else + IPD_data(nb)%Sfcprop%stype(ix) = nint(mn_static%soil_type_grid(i_idx, j_idx)) + endif + + !IPD_data(nb)%Sfcprop%vfrac(ix) = mn_static%veg_frac_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%vtype(ix) = nint(mn_static%veg_type_grid(i_idx, j_idx)) + IPD_data(nb)%Sfcprop%slope(ix) = nint(mn_static%slope_type_grid(i_idx, j_idx)) + IPD_data(nb)%Sfcprop%snoalb(ix) = mn_static%max_snow_alb_grid(i_idx, j_idx) + + IPD_data(nb)%Sfcprop%facsf(ix) = mn_static%facsf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%facwf(ix) = mn_static%facwf_grid(i_idx, j_idx) + + IPD_data(nb)%Sfcprop%alvsf(ix) = mn_static%alvsf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%alvwf(ix) = mn_static%alvwf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%alnsf(ix) = mn_static%alnsf_grid(i_idx, j_idx) + IPD_data(nb)%Sfcprop%alnwf(ix) = mn_static%alnwf_grid(i_idx, j_idx) + + ! Reset the orography in the physics arrays, using the smoothed values from above + phys_oro = Atm(n)%phis(i_pe, j_pe) / grav + IPD_data(nb)%Sfcprop%oro(ix) = phys_oro + IPD_data(nb)%Sfcprop%oro_uf(ix) = phys_oro + + enddo + enddo + + end subroutine mn_phys_reset_sfc_props + + !>@brief The subroutine 'mn_phys_reset_phys_latlon' sets the lat/lons from the high-resolution input file data + !>@details This subroutine sets lat/lons of the moved nest, then recalculates all the derived quantities (dx,dy,etc.) + subroutine mn_reset_phys_latlon(Atm, n, tile_geo, fp_super_tile_geo, Atm_block, IPD_control, IPD_data) + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) !< Array of atmospheric data + integer, intent(in) :: n !< Current grid number + type(grid_geometry), intent(in) :: tile_geo !< Bounds of this grid + type(grid_geometry), intent(in) :: fp_super_tile_geo !< Bounds of high-resolution parent grid + type(block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_data(:) !< Physics variable data + + integer :: isc, jsc, iec, jec + integer :: x, y, fp_i, fp_j + integer :: nest_x, nest_y, parent_x, parent_y + integer :: this_pe + + real(kind=kind_phys), allocatable :: lats(:,:), lons(:,:), area(:,:) + + this_pe = mpp_pe() + + isc = Atm(n)%bd%isc + jsc = Atm(n)%bd%jsc + iec = Atm(n)%bd%iec + jec = Atm(n)%bd%jec + + allocate(lats(isc:iec, jsc:jec)) + allocate(lons(isc:iec, jsc:jec)) + allocate(area(isc:iec, jsc:jec)) + + call calc_nest_alignment(Atm, n, nest_x, nest_y, parent_x, parent_y) + + do x = isc, iec + do y = jsc, jec + fp_i = (x - nest_x) * 2 + parent_x + fp_j = (y - nest_y) * 2 + parent_y + + lons(x,y) = fp_super_tile_geo%lons(fp_i, fp_j) + lats(x,y) = fp_super_tile_geo%lats(fp_i, fp_j) + + ! Need to add the areas from 4 squares, because the netCDF file has areas calculated for the supergrid cells + ! We need the area of the whole center of the cell. + ! Example dimensions for C288_grid.tile6.nc + ! longitude -- x(577,577) + ! latitude -- y(577,577) + ! area -- x(576,576) + + ! Extracting lat/lon/area from Supergrid + ! + ! 1,1----2,1----3,1 + ! | | | + ! | a1,1 | a2,1 | + ! | | | + ! 1,2----2,2----3,2 + ! | | | + ! | a1,2 | a2,2 | + ! | | | + ! 1,3----2,3----3,3 + ! + ! The model A-grid cell 1,1 is centered at supergrid location 2,2 + ! The area of the A-grid cell is the sum of the 4 supergrid areas A = a(1,1) + a(1,2) + a(2,1) + a(2,2) + + area(x,y) = fp_super_tile_geo%area(fp_i - 1, fp_j - 1) + fp_super_tile_geo%area(fp_i - 1, fp_j) + & + fp_super_tile_geo%area(fp_i, fp_j - 1) + fp_super_tile_geo%area(fp_i, fp_j) ! TODO make sure these offsets are correct. + enddo + enddo + + call GFS_grid_populate(IPD_data%Grid, lons, lats, area) + + deallocate(lats) + deallocate(lons) + deallocate(area) + + end subroutine mn_reset_phys_latlon + + !>@brief The subroutine 'mn_phys_fill_temp_variables' extracts 1D physics data into a 2D array for nest motion + !>@details This subroutine fills in the mn_phys structure on the Atm object with 2D arrays of physics/surface variables. + !! Note that ice variables are not yet handled. + subroutine mn_phys_fill_temp_variables(Atm, Atm_block, IPD_Control, IPD_Data, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type (block_control_type), target, intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), target, intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), target, intent(inout) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: isd, ied, jsd, jed + integer :: is, ie, js, je + integer :: this_pe + + integer :: nb, blen, i, j, k, ix, nv + type(fv_moving_nest_physics_type), pointer :: mn_phys + + this_pe = mpp_pe() + + save_Atm_n => Atm(n) + save_Atm_block => Atm_block + save_IPD_Control => IPD_Control + save_IPD_Data => IPD_Data + + isd = Atm(n)%bd%isd + ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd + jed = Atm(n)%bd%jed + + !if (is_fine_pe) call dump_surface_physics(isd+8, jsd+8, npz-1) + + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + mn_phys => Moving_nest(n)%mn_phys + + mn_phys%ts(is:ie, js:je) = Atm(n)%ts(is:ie, js:je) + + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + ! Get the indices only once, before iterating through vertical levels or number of variables + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + + if (move_physics) then + do k = 1, IPD_Control%lsoil + mn_phys%smc(i,j,k) = IPD_Data(nb)%Sfcprop%smc(ix,k) + mn_phys%stc(i,j,k) = IPD_Data(nb)%Sfcprop%stc(ix,k) + mn_phys%slc(i,j,k) = IPD_Data(nb)%Sfcprop%slc(ix,k) + enddo + + mn_phys%emis_lnd(i,j) = IPD_Data(nb)%Sfcprop%emis_lnd(ix) + mn_phys%emis_ice(i,j) = IPD_Data(nb)%Sfcprop%emis_ice(ix) + mn_phys%emis_wat(i,j) = IPD_Data(nb)%Sfcprop%emis_wat(ix) + + !mn_phys%sfalb_lnd(i,j) = IPD_Data(nb)%Sfcprop%sfalb_lnd(ix) + !mn_phys%sfalb_lnd_bck(i,j) = IPD_Data(nb)%Sfcprop%sfalb_lnd_bck(ix) + !mn_phys%semis(i,j) = IPD_Data(nb)%Radtend%semis(ix) + !mn_phys%semisbase(i,j) = IPD_Data(nb)%Sfcprop%semisbase(ix) + !mn_phys%sfalb(i,j) = IPD_Data(nb)%Radtend%sfalb(ix) + + mn_phys%albdirvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirvis_lnd(ix) + mn_phys%albdirnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirnir_lnd(ix) + mn_phys%albdifvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifvis_lnd(ix) + mn_phys%albdifnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifnir_lnd(ix) + + mn_phys%u10m(i,j) = IPD_Data(nb)%IntDiag%u10m(ix) + mn_phys%v10m(i,j) = IPD_Data(nb)%IntDiag%v10m(ix) + mn_phys%tprcp(i,j) = IPD_Data(nb)%Sfcprop%tprcp(ix) + + do k = 1, IPD_Control%nmtvr + mn_phys%hprime(i,j,k) = IPD_Data(nb)%Sfcprop%hprime(ix,k) + enddo + + mn_phys%lakefrac(i,j) = IPD_Data(nb)%Sfcprop%lakefrac(ix) + mn_phys%lakedepth(i,j) = IPD_Data(nb)%Sfcprop%lakedepth(ix) + + mn_phys%canopy(i,j) = IPD_Data(nb)%Sfcprop%canopy(ix) + mn_phys%vegfrac(i,j)= IPD_Data(nb)%Sfcprop%vfrac(ix) + mn_phys%uustar(i,j) = IPD_Data(nb)%Sfcprop%uustar(ix) + mn_phys%shdmin(i,j) = IPD_Data(nb)%Sfcprop%shdmin(ix) + mn_phys%shdmax(i,j) = IPD_Data(nb)%Sfcprop%shdmax(ix) + mn_phys%zorl(i,j) = IPD_Data(nb)%Sfcprop%zorl(ix) + mn_phys%zorll(i,j) = IPD_Data(nb)%Sfcprop%zorll(ix) + mn_phys%zorlwav(i,j)= IPD_Data(nb)%Sfcprop%zorlwav(ix) + mn_phys%zorlw(i,j) = IPD_Data(nb)%Sfcprop%zorlw(ix) + mn_phys%tsfco(i,j) = IPD_Data(nb)%Sfcprop%tsfco(ix) + mn_phys%tsfcl(i,j) = IPD_Data(nb)%Sfcprop%tsfcl(ix) + mn_phys%tsfc(i,j) = IPD_Data(nb)%Sfcprop%tsfc(ix) + + mn_phys%albdirvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirvis_lnd(ix) + mn_phys%albdirnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdirnir_lnd(ix) + mn_phys%albdifvis_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifvis_lnd(ix) + mn_phys%albdifnir_lnd(i,j) = IPD_Data(nb)%Sfcprop%albdifnir_lnd(ix) + + do nv = 1, IPD_Control%ntot2d + mn_phys%phy_f2d(i,j,nv) = IPD_Data(nb)%Tbd%phy_f2d(ix, nv) + enddo + + do k = 1, IPD_Control%levs + do nv = 1, IPD_Control%ntot3d + mn_phys%phy_f3d(i,j,k,nv) = IPD_Data(nb)%Tbd%phy_f3d(ix, k, nv) + enddo + enddo + + ! Cloud prop data has x,y dimensions + mn_phys%cv(i,j) = IPD_Data(nb)%Cldprop%cv(ix) + mn_phys%cvt(i,j) = IPD_Data(nb)%Cldprop%cvt(ix) + mn_phys%cvb(i,j) = IPD_Data(nb)%Cldprop%cvb(ix) + endif + + if (move_nsst) then + mn_phys%tref(i,j) = IPD_Data(nb)%Sfcprop%tref(ix) + mn_phys%z_c(i,j) = IPD_Data(nb)%Sfcprop%z_c(ix) + mn_phys%c_0(i,j) = IPD_Data(nb)%Sfcprop%c_0(ix) + mn_phys%c_d(i,j) = IPD_Data(nb)%Sfcprop%c_d(ix) + mn_phys%w_0(i,j) = IPD_Data(nb)%Sfcprop%w_0(ix) + mn_phys%w_d(i,j) = IPD_Data(nb)%Sfcprop%w_d(ix) + mn_phys%xt(i,j) = IPD_Data(nb)%Sfcprop%xt(ix) + mn_phys%xs(i,j) = IPD_Data(nb)%Sfcprop%xs(ix) + mn_phys%xu(i,j) = IPD_Data(nb)%Sfcprop%xu(ix) + mn_phys%xv(i,j) = IPD_Data(nb)%Sfcprop%xv(ix) + mn_phys%xz(i,j) = IPD_Data(nb)%Sfcprop%xz(ix) + mn_phys%zm(i,j) = IPD_Data(nb)%Sfcprop%zm(ix) + mn_phys%xtts(i,j) = IPD_Data(nb)%Sfcprop%xtts(ix) + mn_phys%xzts(i,j) = IPD_Data(nb)%Sfcprop%xzts(ix) + mn_phys%d_conv(i,j) = IPD_Data(nb)%Sfcprop%d_conv(ix) + mn_phys%dt_cool(i,j)= IPD_Data(nb)%Sfcprop%dt_cool(ix) + mn_phys%qrain(i,j) = IPD_Data(nb)%Sfcprop%qrain(ix) + endif + enddo + enddo + + end subroutine mn_phys_fill_temp_variables + + !>@brief The subroutine 'mn_phys_apply_temp_variables' copies moved 2D data back into 1D physics arryas for nest motion + !>@details This subroutine fills the 1D physics arrays from the mn_phys structure on the Atm object + !! Note that ice variables are not yet handled. + subroutine mn_phys_apply_temp_variables(Atm, Atm_block, IPD_Control, IPD_Data, n, child_grid_num, is_fine_pe, npz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type (block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + integer, intent(in) :: npz !< Number of vertical levels + + integer :: is, ie, js, je + integer :: this_pe + integer :: nb, blen, i, j ,k, ix, nv + type(fv_moving_nest_physics_type), pointer :: mn_phys + + this_pe = mpp_pe() + mn_phys => Moving_nest(n)%mn_phys + + ! Needed to fill the local grids for parent and nest PEs in order to transmit/interpolate data from parent to nest + ! But only the nest PE's have changed the values with nest motion, so they are the only ones that need to update the original arrays + if (is_fine_pe) then + is = Atm(n)%bd%is + ie = Atm(n)%bd%ie + js = Atm(n)%bd%js + je = Atm(n)%bd%je + + ! SST directly in Atm structure + Atm(n)%ts(is:ie, js:je) = mn_phys%ts(is:ie, js:je) + + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + + if (move_physics) then + ! Surface properties + do k = 1, IPD_Control%lsoil + IPD_Data(nb)%Sfcprop%smc(ix,k) = mn_phys%smc(i,j,k) + IPD_Data(nb)%Sfcprop%stc(ix,k) = mn_phys%stc(i,j,k) + IPD_Data(nb)%Sfcprop%slc(ix,k) = mn_phys%slc(i,j,k) + enddo + + ! EMIS PATCH - Force to positive at all locations. + if (mn_phys%emis_lnd(i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%emis_lnd(ix) = mn_phys%emis_lnd(i,j) + else + IPD_Data(nb)%Sfcprop%emis_lnd(ix) = 0.5 + endif + if (mn_phys%emis_ice(i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%emis_ice(ix) = mn_phys%emis_ice(i,j) + else + IPD_Data(nb)%Sfcprop%emis_ice(ix) = 0.5 + endif + if (mn_phys%emis_wat(i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%emis_wat(ix) = mn_phys%emis_wat(i,j) + else + IPD_Data(nb)%Sfcprop%emis_wat(ix) = 0.5 + endif + + !IPD_Data(nb)%Sfcprop%sfalb_lnd(ix) = mn_phys%sfalb_lnd(i,j) + !IPD_Data(nb)%Sfcprop%sfalb_lnd_bck(ix) = mn_phys%sfalb_lnd_bck(i,j) + !IPD_Data(nb)%Radtend%semis(ix) = mn_phys%semis(i,j) + !IPD_Data(nb)%Sfcprop%semisbase(ix) = mn_phys%semisbase(i,j) + !IPD_Data(nb)%Radtend%sfalb(ix) = mn_phys%sfalb(i,j) + + IPD_Data(nb)%IntDiag%u10m(ix) = mn_phys%u10m(i,j) + IPD_Data(nb)%IntDiag%v10m(ix) = mn_phys%v10m(i,j) + IPD_Data(nb)%Sfcprop%tprcp(ix) = mn_phys%tprcp(i,j) + + do k = 1, IPD_Control%nmtvr + IPD_Data(nb)%Sfcprop%hprime(ix,k) = mn_phys%hprime(i,j,k) + enddo + + IPD_Data(nb)%Sfcprop%lakefrac(ix) = mn_phys%lakefrac(i,j) + IPD_Data(nb)%Sfcprop%lakedepth(ix) = mn_phys%lakedepth(i,j) + + IPD_Data(nb)%Sfcprop%canopy(ix) = mn_phys%canopy(i,j) + IPD_Data(nb)%Sfcprop%vfrac(ix) = mn_phys%vegfrac(i,j) + IPD_Data(nb)%Sfcprop%uustar(ix) = mn_phys%uustar(i,j) + IPD_Data(nb)%Sfcprop%shdmin(ix) = mn_phys%shdmin(i,j) + IPD_Data(nb)%Sfcprop%shdmax(ix) = mn_phys%shdmax(i,j) + + ! Set roughness lengths to physically reasonable values if they have fill value (possible at coastline) + ! sea/land mask array (sea:0,land:1,sea-ice:2) + if (nint(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 1 .and. mn_phys%zorll(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorll(ix) = 82.0 ! + else + IPD_Data(nb)%Sfcprop%zorll(ix) = mn_phys%zorll(i,j) + endif + + if (nint(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 0 .and. mn_phys%zorlw(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorlw(ix) = 83.0 ! + else + IPD_Data(nb)%Sfcprop%zorlw(ix) = mn_phys%zorlw(i,j) + endif + + if (nint(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 0 .and. mn_phys%zorlwav(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorlwav(ix) = 84.0 ! + else + IPD_Data(nb)%Sfcprop%zorlwav(ix) = mn_phys%zorlwav(i,j) + endif + + if (mn_phys%zorl(i,j) .gt. 1e6) then + IPD_Data(nb)%Sfcprop%zorl(ix) = 85.0 + else + IPD_Data(nb)%Sfcprop%zorl(ix) = mn_phys%zorl(i,j) + endif + + IPD_Data(nb)%Sfcprop%tsfco(ix) = mn_phys%tsfco(i,j) + IPD_Data(nb)%Sfcprop%tsfcl(ix) = mn_phys%tsfcl(i,j) + IPD_Data(nb)%Sfcprop%tsfc(ix) = mn_phys%tsfc(i,j) + + ! Set albedo values to physically reasonable values if they have negative fill values. + if (mn_phys%albdirvis_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdirvis_lnd (ix) = mn_phys%albdirvis_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdirvis_lnd (ix) = 0.5 + endif + + if (mn_phys%albdirnir_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdirnir_lnd (ix) = mn_phys%albdirnir_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdirnir_lnd (ix) = 0.5 + endif + + if (mn_phys%albdifvis_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdifvis_lnd (ix) = mn_phys%albdifvis_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdifvis_lnd (ix) = 0.5 + endif + + if (mn_phys%albdifnir_lnd (i,j) .ge. 0.0) then + IPD_Data(nb)%Sfcprop%albdifnir_lnd (ix) = mn_phys%albdifnir_lnd (i,j) + else + IPD_Data(nb)%Sfcprop%albdifnir_lnd (ix) = 0.5 + endif + + ! Cloud properties + IPD_Data(nb)%Cldprop%cv(ix) = mn_phys%cv(i,j) + IPD_Data(nb)%Cldprop%cvt(ix) = mn_phys%cvt(i,j) + IPD_Data(nb)%Cldprop%cvb(ix) = mn_phys%cvb(i,j) + + do nv = 1, IPD_Control%ntot2d + IPD_Data(nb)%Tbd%phy_f2d(ix, nv) = mn_phys%phy_f2d(i,j,nv) + enddo + + do k = 1, IPD_Control%levs + do nv = 1, IPD_Control%ntot3d + IPD_Data(nb)%Tbd%phy_f3d(ix, k, nv) = mn_phys%phy_f3d(i,j,k,nv) + enddo + enddo + endif + + if (move_nsst) then + IPD_Data(nb)%Sfcprop%tref(ix) = mn_phys%tref(i,j) + IPD_Data(nb)%Sfcprop%z_c(ix) = mn_phys%z_c(i,j) + IPD_Data(nb)%Sfcprop%c_0(ix) = mn_phys%c_0(i,j) + IPD_Data(nb)%Sfcprop%c_d(ix) = mn_phys%c_d(i,j) + IPD_Data(nb)%Sfcprop%w_0(ix) = mn_phys%w_0(i,j) + IPD_Data(nb)%Sfcprop%w_d(ix) = mn_phys%w_d(i,j) + IPD_Data(nb)%Sfcprop%xt(ix) = mn_phys%xt(i,j) + IPD_Data(nb)%Sfcprop%xs(ix) = mn_phys%xs(i,j) + IPD_Data(nb)%Sfcprop%xu(ix) = mn_phys%xu(i,j) + IPD_Data(nb)%Sfcprop%xv(ix) = mn_phys%xv(i,j) + IPD_Data(nb)%Sfcprop%xz(ix) = mn_phys%xz(i,j) + IPD_Data(nb)%Sfcprop%zm(ix) = mn_phys%zm(i,j) + IPD_Data(nb)%Sfcprop%xtts(ix) = mn_phys%xtts(i,j) + IPD_Data(nb)%Sfcprop%xzts(ix) = mn_phys%xzts(i,j) + IPD_Data(nb)%Sfcprop%d_conv(ix) = mn_phys%d_conv(i,j) + IPD_Data(nb)%Sfcprop%dt_cool(ix) = mn_phys%dt_cool(i,j) + IPD_Data(nb)%Sfcprop%qrain(ix) = mn_phys%qrain(i,j) + endif + + ! Check if stype and vtype are properly set for land points. Set to reasonable values if they have fill values. + if ( (int(IPD_data(nb)%Sfcprop%slmsk(ix)) .eq. 1) ) then + + if (IPD_data(nb)%Sfcprop%vtype(ix) .lt. 0.5) then + IPD_data(nb)%Sfcprop%vtype(ix) = 7 ! Force to grassland + endif + + if (IPD_data(nb)%Sfcprop%stype(ix) .lt. 0.5) then + IPD_data(nb)%Sfcprop%stype(ix) = 3 ! Force to sandy loam + endif + + if (IPD_data(nb)%Sfcprop%vtype_save(ix) .lt. 0.5) then + IPD_data(nb)%Sfcprop%vtype_save(ix) = 7 ! Force to grassland + endif + if (IPD_data(nb)%Sfcprop%stype_save(ix) .lt. 0.5) then + IPD_data(nb)%Sfcprop%stype_save(ix) = 3 ! Force to sandy loam + endif + + endif + enddo + enddo + endif + + end subroutine mn_phys_apply_temp_variables + + + !>@brief The subroutine 'mn_physfill_nest_halos_from_parent' transfers data from the coarse grid to the nest edge + !>@details This subroutine must run on parent and nest PEs to complete the data transfers + subroutine mn_phys_fill_nest_halos_from_parent(Atm, IPD_Control, IPD_Data, mn_static, n, child_grid_num, is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + type(mn_surface_grids), intent(in) :: mn_static !< Static data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + logical, intent(in) :: is_fine_pe !< Is this a nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain for FMS + integer, intent(in) :: nz !< Number of vertical levels + + integer :: position, position_u, position_v + integer :: interp_type, interp_type_u, interp_type_v, interp_type_lmask + integer :: x_refine, y_refine + type(fv_moving_nest_physics_type), pointer :: mn_phys + + interp_type = 1 ! cell-centered A-grid + interp_type_u = 4 ! D-grid + interp_type_v = 4 ! D-grid + interp_type_lmask = 7 ! land mask, cell-centered A-grid + + position = CENTER + position_u = NORTH + position_v = EAST + + x_refine = Atm(child_grid_num)%neststruct%refinement + y_refine = x_refine + + mn_phys => Moving_nest(n)%mn_phys + + ! Fill centered-grid variables + + call fill_nest_halos_from_parent("ts", mn_phys%ts, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + if (move_physics) then + call fill_nest_halos_from_parent("smc", mn_phys%smc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call fill_nest_halos_from_parent("stc", mn_phys%stc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call fill_nest_halos_from_parent("slc", mn_phys%slc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%lsoil) + + call fill_nest_halos_from_parent("phy_f2d", mn_phys%phy_f2d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%ntot2d) + + call fill_nest_halos_from_parent("phy_f3d", mn_phys%phy_f3d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%levs) + + !! Surface variables + + !call fill_nest_halos_from_parent("sfalb_lnd", mn_phys%sfalb_lnd, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + + ! sea/land mask array (sea:0,land:1,sea-ice:2) + + call fill_nest_halos_from_parent_masked("emis_lnd", mn_phys%emis_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + + call fill_nest_halos_from_parent_masked("emis_ice", mn_phys%emis_ice, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 2, 0.5D0) + + call fill_nest_halos_from_parent_masked("emis_wat", mn_phys%emis_wat, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 0, 0.5D0) + + !call fill_nest_halos_from_parent("sfalb_lnd_bck", mn_phys%sfalb_lnd_bck, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + + + !call fill_nest_halos_from_parent("semis", mn_phys%semis, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + !call fill_nest_halos_from_parent("semisbase", mn_phys%semisbase, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + !call fill_nest_halos_from_parent("sfalb", mn_phys%sfalb, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + ! Atm(child_grid_num)%neststruct%ind_h, & + ! x_refine, y_refine, & + ! is_fine_pe, nest_domain, position) + + + call fill_nest_halos_from_parent("u10m", mn_phys%u10m, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("v10m", mn_phys%v10m, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("tprcp", mn_phys%tprcp, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent("hprime", mn_phys%hprime, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, IPD_Control%nmtvr) + + call fill_nest_halos_from_parent("lakefrac", mn_phys%lakefrac, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("lakedepth", mn_phys%lakedepth, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent("canopy", mn_phys%canopy, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("vegfrac", mn_phys%vegfrac, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("uustar", mn_phys%uustar, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("shdmin", mn_phys%shdmin, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("shdmax", mn_phys%shdmax, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("zorl", mn_phys%zorl, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent_masked("zorll", mn_phys%zorll, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 86.0D0) + call fill_nest_halos_from_parent_masked("zorlwav", mn_phys%zorlwav, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 0, 77.0D0) + call fill_nest_halos_from_parent_masked("zorlw", mn_phys%zorlw, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 0, 78.0D0) + + call fill_nest_halos_from_parent("tsfco", mn_phys%tsfco, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("tsfcl", mn_phys%tsfcl, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("tsfc", mn_phys%tsfc, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + call fill_nest_halos_from_parent_masked("albdirvis_lnd", mn_phys%albdirvis_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + call fill_nest_halos_from_parent_masked("albdirnir_lnd", mn_phys%albdirnir_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + call fill_nest_halos_from_parent_masked("albdifvis_lnd", mn_phys%albdifvis_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + call fill_nest_halos_from_parent_masked("albdifnir_lnd", mn_phys%albdifnir_lnd, interp_type_lmask, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position, mn_phys%slmsk, 1, 0.5D0) + + + + call fill_nest_halos_from_parent("cv", mn_phys%cv, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("cvt", mn_phys%cvt, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("cvb", mn_phys%cvb, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + endif + + if (move_nsst) then + + call fill_nest_halos_from_parent("tref", mn_phys%tref, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("z_c", mn_phys%z_c, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("c_0", mn_phys%c_0, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("c_d", mn_phys%c_d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("w_0", mn_phys%w_0, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("w_d", mn_phys%w_d, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xt", mn_phys%xt, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xs", mn_phys%xs, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xu", mn_phys%xu, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xv", mn_phys%xv, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xz", mn_phys%xz, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("zm", mn_phys%zm, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xtts", mn_phys%xtts, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("xzts", mn_phys%xzts, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("d_conv", mn_phys%d_conv, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("dt_cool", mn_phys%dt_cool, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + call fill_nest_halos_from_parent("qrain", mn_phys%qrain, interp_type, Atm(child_grid_num)%neststruct%wt_h, & + Atm(child_grid_num)%neststruct%ind_h, & + x_refine, y_refine, & + is_fine_pe, nest_domain, position) + + endif + + end subroutine mn_phys_fill_nest_halos_from_parent + + !>@brief The subroutine 'mn_phys_fill_intern_nest_halos' fills the intenal nest halos for the physics variables + !>@details This subroutine is only called for the nest PEs. + subroutine mn_phys_fill_intern_nest_halos(moving_nest, IPD_Control, IPD_Data, domain_fine, is_fine_pe) + type(fv_moving_nest_type), target, intent(inout) :: moving_nest !< Single instance of moving nest data + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + type(domain2d), intent(inout) :: domain_fine !< Domain structure for this nest + logical, intent(in) :: is_fine_pe !< Is nest PE - should be True. Argument is redundant. + + type(fv_moving_nest_physics_type), pointer :: mn_phys + + mn_phys => moving_nest%mn_phys + + call mn_var_fill_intern_nest_halos(mn_phys%ts, domain_fine, is_fine_pe) !! Skin Temp/SST + if (move_physics) then + call mn_var_fill_intern_nest_halos(mn_phys%smc, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%stc, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%slc, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%phy_f2d, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%phy_f3d, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%emis_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%emis_ice, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%emis_wat, domain_fine, is_fine_pe) + + !call mn_var_fill_intern_nest_halos(mn_phys%sfalb_lnd, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%sfalb_lnd_bck, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%semis, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%semisbase, domain_fine, is_fine_pe) + !call mn_var_fill_intern_nest_halos(mn_phys%sfalb, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%u10m, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%v10m, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tprcp, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%hprime, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%lakefrac, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%lakedepth, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%canopy, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%vegfrac, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%uustar, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%shdmin, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%shdmax, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorl, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorll, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorlwav, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zorlw, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tsfco, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tsfcl, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%tsfc, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%albdirvis_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%albdirnir_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%albdifvis_lnd, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%albdifnir_lnd, domain_fine, is_fine_pe) + + call mn_var_fill_intern_nest_halos(mn_phys%cv, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%cvt, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%cvb, domain_fine, is_fine_pe) + endif + + if (move_nsst) then + call mn_var_fill_intern_nest_halos(mn_phys%tref, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%z_c, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%c_0, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%c_d, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%w_0, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%w_d, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xt, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xs, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xu, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xv, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xz, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%zm, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xtts, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%xzts, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%d_conv, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%dt_cool, domain_fine, is_fine_pe) + call mn_var_fill_intern_nest_halos(mn_phys%qrain, domain_fine, is_fine_pe) + endif + + end subroutine mn_phys_fill_intern_nest_halos + + !>@brief The subroutine 'mn_phys_shift_data' shifts the variable in the nest, including interpolating at the leading edge + !>@details This subroutine is called for the nest and parent PEs. + subroutine mn_phys_shift_data(Atm, IPD_Control, IPD_Data, n, child_grid_num, wt_h, wt_u, wt_v, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, nz) + type(fv_atmos_type), allocatable, target, intent(inout) :: Atm(:) !< Array of atmospheric data + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(inout) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: n, child_grid_num !< Current grid number, child grid number + real, allocatable, intent(in) :: wt_h(:,:,:), wt_u(:,:,:), wt_v(:,:,:) !< Interpolation weights + integer, intent(in) :: delta_i_c, delta_j_c !< Nest motion in i,j direction + integer, intent(in) :: x_refine, y_refine !< Nest refinement + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + type(nest_domain_type), intent(inout) :: nest_domain !< Nest domain structure + integer, intent(in) :: nz !< Number of vertical levels + + ! Constants for mpp calls + integer :: interp_type = 1 ! cell-centered A-grid + integer :: interp_type_u = 4 ! D-grid + integer :: interp_type_v = 4 ! D-grid + integer :: position = CENTER + integer :: position_u = NORTH + integer :: position_v = EAST + type(fv_moving_nest_physics_type), pointer :: mn_phys + + mn_phys => Moving_nest(n)%mn_phys + + !! Skin temp/SST + call mn_var_shift_data(mn_phys%ts, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + if (move_physics) then + !! Soil variables + call mn_var_shift_data(mn_phys%smc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call mn_var_shift_data(mn_phys%stc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%lsoil) + call mn_var_shift_data(mn_phys%slc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%lsoil) + + !! Physics arrays + call mn_var_shift_data(mn_phys%phy_f2d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_control%ntot2d) + + call mn_var_shift_data(mn_phys%phy_f3d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%levs) + + ! Surface variables + + call mn_var_shift_data(mn_phys%emis_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%emis_ice, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%emis_wat, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + + !call mn_var_shift_data(mn_phys%sfalb_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%sfalb_lnd_bck, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%semis, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%semisbase, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + !call mn_var_shift_data(mn_phys%sfalb, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + ! delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + + call mn_var_shift_data(mn_phys%u10m, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%v10m, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tprcp, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%hprime, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position, IPD_Control%nmtvr) + call mn_var_shift_data(mn_phys%lakefrac, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%lakedepth, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%canopy, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%vegfrac, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%uustar, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%shdmin, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%shdmax, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorl, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorll, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorlwav, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zorlw, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tsfco, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tsfcl, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%tsfc, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdirvis_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdirnir_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdifvis_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%albdifnir_lnd, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%cv, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%cvt, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%cvb, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + endif + + if (move_nsst) then + call mn_var_shift_data(mn_phys%tref, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%z_c, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%c_0, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%c_d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%w_0, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%w_d, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xt, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xs, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xu, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xv, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xz, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%zm, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xtts, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%xzts, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%d_conv, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%dt_cool, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + call mn_var_shift_data(mn_phys%qrain, interp_type, wt_h, Atm(child_grid_num)%neststruct%ind_h, & + delta_i_c, delta_j_c, x_refine, y_refine, is_fine_pe, nest_domain, position) + endif + + end subroutine mn_phys_shift_data + + !>@brief The subroutine 'mn_phys_dump_to_netcdf' dumps physics variables to debugging netCDF files + !>@details This subroutine is called for the nest and parent PEs. + subroutine mn_phys_dump_to_netcdf(Atm, Atm_block, IPD_Control, IPD_Data, time_val, file_prefix, is_fine_pe, domain_coarse, domain_fine, nz) + type(fv_atmos_type), intent(in) :: Atm !< Single instance of atmospheric data + type (block_control_type), intent(in) :: Atm_block !< Physics block layout + type(IPD_control_type), intent(in) :: IPD_Control !< Physics metadata + type(IPD_data_type), intent(in) :: IPD_Data(:) !< Physics variable data + integer, intent(in) :: time_val !< Timestep number for filename + character(len=*), intent(in) :: file_prefix !< Prefix for output netCDF filenames + logical, intent(in) :: is_fine_pe !< Is this the nest PE? + type(domain2d), intent(in) :: domain_coarse, domain_fine !< Domain structures for parent and nest + integer, intent(in) :: nz !< Number of vertical levels + + integer :: is, ie, js, je + integer :: nb, blen, i, j, k, ix, nv + integer :: this_pe + + integer :: n_moist + character(len=16) :: out_var_name, phys_var_name + integer :: position = CENTER + + ! Coerce the double precision variables from physics into single precision for debugging netCDF output + ! Does not affect values used in calculations. + ! TODO do we want to dump these as double precision?? + real, allocatable :: smc_pr_local (:,:,:) !< soil moisture content + real, allocatable :: stc_pr_local (:,:,:) !< soil temperature + real, allocatable :: slc_pr_local (:,:,:) !< soil liquid water content + real, allocatable, dimension(:,:) :: sealand_pr_local, deep_soil_t_pr_local, soil_type_pr_local, veg_type_pr_local, slope_type_pr_local, max_snow_alb_pr_local + real, allocatable, dimension(:,:) :: tsfco_pr_local, tsfcl_pr_local, tsfc_pr_local, vegfrac_pr_local + real, allocatable, dimension(:,:) :: tref_pr_local, c_0_pr_local, xt_pr_local, xu_pr_local, xv_pr_local, ifd_pr_local + real, allocatable, dimension(:,:) :: facsf_pr_local, facwf_pr_local + real, allocatable, dimension(:,:) :: alvsf_pr_local, alvwf_pr_local, alnsf_pr_local, alnwf_pr_local + real, allocatable, dimension(:,:) :: zorl_pr_local, zorll_pr_local, zorlw_pr_local, zorli_pr_local + real, allocatable :: phy_f2d_pr_local (:,:,:) + real, allocatable :: phy_f3d_pr_local (:,:,:,:) + real, allocatable :: lakefrac_pr_local (:,:) !< lake fraction + real, allocatable :: landfrac_pr_local (:,:) !< land fraction + real, allocatable :: emis_lnd_pr_local (:,:) !< emissivity land + + this_pe = mpp_pe() + + ! Skin temp/SST + call mn_var_dump_to_netcdf(Atm%ts, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "SSTK") + ! Terrain height == phis / grav + call mn_var_dump_to_netcdf(Atm%phis / grav, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "orog") + + ! sgh and oro were only fully allocated if fv_land is True + ! if false, oro is (1,1), and sgh is not allocated + if ( Atm%flagstruct%fv_land ) then + ! land frac -- called oro in fv_array.F90 + call mn_var_dump_to_netcdf(Atm%oro, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "LFRAC") + ! terrain standard deviation -- called sgh in fv_array.F90 + call mn_var_dump_to_netcdf(Atm%sgh, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "STDDEV") + endif + + is = Atm%bd%is + ie = Atm%bd%ie + js = Atm%bd%js + je = Atm%bd%je + + ! Just allocate compute domain size here for outputs; the nest moving code also has halos added, but we don't need them here. + if (move_physics) then + allocate ( smc_pr_local(is:ie, js:je, IPD_Control%lsoil) ) + allocate ( stc_pr_local(is:ie, js:je, IPD_Control%lsoil) ) + allocate ( slc_pr_local(is:ie, js:je, IPD_Control%lsoil) ) + allocate ( sealand_pr_local(is:ie, js:je) ) + allocate ( lakefrac_pr_local(is:ie, js:je) ) + allocate ( landfrac_pr_local(is:ie, js:je) ) + allocate ( emis_lnd_pr_local(is:ie, js:je) ) + allocate ( phy_f2d_pr_local(is:ie, js:je, IPD_Control%ntot2d) ) + allocate ( phy_f3d_pr_local(is:ie, js:je, IPD_Control%levs, IPD_Control%ntot3d) ) + allocate ( tsfco_pr_local(is:ie, js:je) ) + allocate ( tsfcl_pr_local(is:ie, js:je) ) + allocate ( tsfc_pr_local(is:ie, js:je) ) + allocate ( vegfrac_pr_local(is:ie, js:je) ) + allocate ( alvsf_pr_local(is:ie, js:je) ) + allocate ( alvwf_pr_local(is:ie, js:je) ) + allocate ( alnsf_pr_local(is:ie, js:je) ) + allocate ( alnwf_pr_local(is:ie, js:je) ) + allocate ( deep_soil_t_pr_local(is:ie, js:je) ) + allocate ( soil_type_pr_local(is:ie, js:je) ) + !allocate ( veg_frac_pr_local(is:ie, js:je) ) + allocate ( veg_type_pr_local(is:ie, js:je) ) + allocate ( slope_type_pr_local(is:ie, js:je) ) + allocate ( max_snow_alb_pr_local(is:ie, js:je) ) + allocate ( facsf_pr_local(is:ie, js:je) ) + allocate ( facwf_pr_local(is:ie, js:je) ) + allocate ( zorl_pr_local(is:ie, js:je) ) + allocate ( zorll_pr_local(is:ie, js:je) ) + allocate ( zorlw_pr_local(is:ie, js:je) ) + allocate ( zorli_pr_local(is:ie, js:je) ) + endif + + if (move_nsst) then + allocate ( tref_pr_local(is:ie, js:je) ) + allocate ( c_0_pr_local(is:ie, js:je) ) + allocate ( xt_pr_local(is:ie, js:je) ) + allocate ( xu_pr_local(is:ie, js:je) ) + allocate ( xv_pr_local(is:ie, js:je) ) + allocate ( ifd_pr_local(is:ie, js:je) ) + endif + + if (move_physics) then + smc_pr_local = +99999.9 + stc_pr_local = +99999.9 + slc_pr_local = +99999.9 + sealand_pr_local = +99999.9 + lakefrac_pr_local = +99999.9 + landfrac_pr_local = +99999.9 + emis_lnd_pr_local = +99999.9 + phy_f2d_pr_local = +99999.9 + phy_f3d_pr_local = +99999.9 + tsfco_pr_local = +99999.9 + tsfcl_pr_local = +99999.9 + tsfc_pr_local = +99999.9 + vegfrac_pr_local = +99999.9 + alvsf_pr_local = +99999.9 + alvwf_pr_local = +99999.9 + alnsf_pr_local = +99999.9 + alnwf_pr_local = +99999.9 + endif + if (move_nsst) then + tref_pr_local = +99999.9 + c_0_pr_local = +99999.9 + xt_pr_local = +99999.9 + xu_pr_local = +99999.9 + xv_pr_local = +99999.9 + ifd_pr_local = +99999.9 + endif + + do nb = 1,Atm_block%nblks + blen = Atm_block%blksz(nb) + do ix = 1, blen + i = Atm_block%index(nb)%ii(ix) + j = Atm_block%index(nb)%jj(ix) + + if (move_physics) then + do k = 1, IPD_Control%lsoil + ! Use real() to lower the precision + smc_pr_local(i,j,k) = real(IPD_Data(nb)%Sfcprop%smc(ix,k)) + stc_pr_local(i,j,k) = real(IPD_Data(nb)%Sfcprop%stc(ix,k)) + slc_pr_local(i,j,k) = real(IPD_Data(nb)%Sfcprop%slc(ix,k)) + enddo + + sealand_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%slmsk(ix)) + lakefrac_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%lakefrac(ix)) + landfrac_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%landfrac(ix)) + emis_lnd_pr_local(i,j) = real(IPD_Data(nb)%Sfcprop%emis_lnd(ix)) + deep_soil_t_pr_local(i, j) = IPD_data(nb)%Sfcprop%tg3(ix) + soil_type_pr_local(i, j) = IPD_data(nb)%Sfcprop%stype(ix) + !veg_frac_pr_local(i, j) = IPD_data(nb)%Sfcprop%vfrac(ix) + veg_type_pr_local(i, j) = IPD_data(nb)%Sfcprop%vtype(ix) + slope_type_pr_local(i, j) = IPD_data(nb)%Sfcprop%slope(ix) + facsf_pr_local(i, j) = IPD_data(nb)%Sfcprop%facsf(ix) + facwf_pr_local(i, j) = IPD_data(nb)%Sfcprop%facwf(ix) + zorl_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorl(ix) + zorlw_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorlw(ix) + zorll_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorll(ix) + zorli_pr_local(i, j) = IPD_data(nb)%Sfcprop%zorli(ix) + max_snow_alb_pr_local(i, j) = IPD_data(nb)%Sfcprop%snoalb(ix) + tsfco_pr_local(i, j) = IPD_data(nb)%Sfcprop%tsfco(ix) + tsfcl_pr_local(i, j) = IPD_data(nb)%Sfcprop%tsfcl(ix) + tsfc_pr_local(i, j) = IPD_data(nb)%Sfcprop%tsfc(ix) + vegfrac_pr_local(i, j) = IPD_data(nb)%Sfcprop%vfrac(ix) + alvsf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alvsf(ix) + alvwf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alvwf(ix) + alnsf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alnsf(ix) + alnwf_pr_local(i, j) = IPD_data(nb)%Sfcprop%alnwf(ix) + + do nv = 1, IPD_Control%ntot2d + ! Use real() to lower the precision + phy_f2d_pr_local(i,j,nv) = real(IPD_Data(nb)%Tbd%phy_f2d(ix, nv)) + enddo + + do k = 1, IPD_Control%levs + do nv = 1, IPD_Control%ntot3d + ! Use real() to lower the precision + phy_f3d_pr_local(i,j,k,nv) = real(IPD_Data(nb)%Tbd%phy_f3d(ix, k, nv)) + enddo + enddo + endif + + if (move_nsst) then + tref_pr_local(i,j) = IPD_data(nb)%Sfcprop%tref(ix) + c_0_pr_local(i,j) = IPD_data(nb)%Sfcprop%c_0(ix) + xt_pr_local(i,j) = IPD_data(nb)%Sfcprop%xt(ix) + xu_pr_local(i,j) = IPD_data(nb)%Sfcprop%xu(ix) + xv_pr_local(i,j) = IPD_data(nb)%Sfcprop%xv(ix) + ifd_pr_local(i,j) = IPD_data(nb)%Sfcprop%ifd(ix) + endif + enddo + enddo + + if (move_physics) then + !call mn_var_dump_to_netcdf(stc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%lsoil, time_val, Atm%global_tile, file_prefix, "SOILT") + !call mn_var_dump_to_netcdf(smc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%lsoil, time_val, Atm%global_tile, file_prefix, "SOILM") + !call mn_var_dump_to_netcdf(slc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%lsoil, time_val, Atm%global_tile, file_prefix, "SOILL") + call mn_var_dump_to_netcdf(sealand_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "LMASK") + call mn_var_dump_to_netcdf(lakefrac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "LAKEFRAC") + call mn_var_dump_to_netcdf(landfrac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "LANDFRAC") + call mn_var_dump_to_netcdf(emis_lnd_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "EMISLAND") + call mn_var_dump_to_netcdf(deep_soil_t_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "DEEPSOIL") + call mn_var_dump_to_netcdf(soil_type_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "SOILTP") + !call mn_var_dump_to_netcdf(veg_frac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "VEGFRAC") + call mn_var_dump_to_netcdf(veg_type_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "VEGTYPE") + call mn_var_dump_to_netcdf(slope_type_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "SLOPE") + call mn_var_dump_to_netcdf(max_snow_alb_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "SNOWALB") + call mn_var_dump_to_netcdf(tsfco_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "TSFCO") + call mn_var_dump_to_netcdf(tsfcl_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "TSFCL") + call mn_var_dump_to_netcdf(tsfc_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "TSFC") + call mn_var_dump_to_netcdf(vegfrac_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "VEGFRAC") + call mn_var_dump_to_netcdf(alvsf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ALVSF") + call mn_var_dump_to_netcdf(alvwf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ALVWF") + call mn_var_dump_to_netcdf(alnsf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ALNSF") + call mn_var_dump_to_netcdf(alnwf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ALNWF") + call mn_var_dump_to_netcdf(facsf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "FACSF") + call mn_var_dump_to_netcdf(facwf_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "FACWF") + call mn_var_dump_to_netcdf(zorl_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ZORL") + call mn_var_dump_to_netcdf(zorlw_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ZORLW") + call mn_var_dump_to_netcdf(zorll_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ZORLL") + call mn_var_dump_to_netcdf(zorli_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "ZORLI") + + do nv = 1, IPD_Control%ntot2d + write (phys_var_name, "(A4,I0.3)") 'PH2D', nv + !call mn_var_dump_to_netcdf(phy_f2d_pr_local(:,:,nv), is_fine_pe, domain_coarse, domain_fine, position, 1, & + ! time_val, Atm%global_tile, file_prefix, phys_var_name) + enddo + + do nv = 1, IPD_Control%ntot3d + write (phys_var_name, "(A4,I0.3)") 'PH3D', nv + !call mn_var_dump_to_netcdf(phy_f3d_pr_local(:,:,:,nv), is_fine_pe, domain_coarse, domain_fine, position, IPD_Control%levs, & + ! time_val, Atm%global_tile, file_prefix, phys_var_name) + enddo + endif + + if (move_nsst) then + call mn_var_dump_to_netcdf(tref_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "TREF") + call mn_var_dump_to_netcdf(c_0_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "C_0") + call mn_var_dump_to_netcdf(xt_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "XT") + call mn_var_dump_to_netcdf(xu_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "XU") + call mn_var_dump_to_netcdf(xv_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "XV") + call mn_var_dump_to_netcdf(ifd_pr_local, is_fine_pe, domain_coarse, domain_fine, position, time_val, Atm%global_tile, file_prefix, "IFD") + endif + + if (move_physics) then + deallocate(smc_pr_local) + deallocate(stc_pr_local) + deallocate(slc_pr_local) + deallocate(lakefrac_pr_local) + deallocate(landfrac_pr_local) + deallocate(emis_lnd_pr_local) + deallocate(sealand_pr_local, deep_soil_t_pr_local, soil_type_pr_local, veg_type_pr_local, max_snow_alb_pr_local) + deallocate(tsfco_pr_local, tsfcl_pr_local, tsfc_pr_local, vegfrac_pr_local) + deallocate(alvsf_pr_local, alvwf_pr_local, alnsf_pr_local, alnwf_pr_local) + deallocate(facsf_pr_local, facwf_pr_local) + deallocate(zorl_pr_local, zorlw_pr_local, zorll_pr_local, zorli_pr_local) + deallocate(phy_f2d_pr_local) + deallocate(phy_f3d_pr_local) + endif + + if (move_nsst) deallocate(tref_pr_local, c_0_pr_local, xt_pr_local, xu_pr_local, xv_pr_local, ifd_pr_local) + + end subroutine mn_phys_dump_to_netcdf + +end module fv_moving_nest_physics_mod diff --git a/moving_nest/fv_moving_nest_types.F90 b/moving_nest/fv_moving_nest_types.F90 new file mode 100644 index 000000000..0a2a088fc --- /dev/null +++ b/moving_nest/fv_moving_nest_types.F90 @@ -0,0 +1,626 @@ +!*********************************************************************** +!* GNU General Public License * +!* This file is a part of fvGFS. * +!* * +!* fvGFS is free software; you can redistribute it and/or modify it * +!* and are expected to follow the terms of the GNU General Public * +!* License as published by the Free Software Foundation; either * +!* version 2 of the License, or (at your option) any later version. * +!* * +!* fvGFS is distributed in the hope that it will be useful, but * +!* WITHOUT ANY WARRANTY; without even the implied warranty of * +!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * +!* General Public License for more details. * +!* * +!* For the full text of the GNU General Public License, * +!* write to: Free Software Foundation, Inc., * +!* 675 Mass Ave, Cambridge, MA 02139, USA. * +!* or see: http://www.gnu.org/licenses/gpl.html * +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides data structures for moving nest functionality +!! @author W. Ramstrom, AOML/HRD 03/24/2022 +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + +module fv_moving_nest_types_mod + +#include + +#ifdef GFS_TYPES + use GFS_typedefs, only: kind_phys +#else + use IPD_typedefs, only: kind_phys => IPD_kind_phys +#endif + + use fms_mod, only: check_nml_error + use fv_arrays_mod, only: fv_atmos_type + use fv_mp_mod, only: MAX_NNEST + use mpp_mod, only: input_nml_file, mpp_pe, read_input_nml + + implicit none + + type fv_moving_nest_flag_type + ! Moving Nest Namelist Variables + logical :: is_moving_nest = .false. + character(len=120) :: surface_dir = "INPUT/moving_nest" + integer :: terrain_smoother = 1 + integer :: vortex_tracker = 0 + integer :: ntrack = 1 + integer :: corral_x = 5 + integer :: corral_y = 5 + + integer :: outatcf_lun = 600 + + ! Moving nest related variables + integer :: move_cd_x = 0 + integer :: move_cd_y = 0 + logical :: do_move = .false. + end type fv_moving_nest_flag_type + + ! Encapsulates the grid definition data, such as read from the netCDF files + type grid_geometry + integer :: nx, ny, nxp, nyp + + real(kind=kind_phys), allocatable :: lats(:,:) + real(kind=kind_phys), allocatable :: lons(:,:) + + !real, allocatable :: dx(:,:) + !real, allocatable :: dy(:,:) + real(kind=kind_phys), allocatable :: area(:,:) + end type grid_geometry + + type fv_moving_nest_prog_type + real, _ALLOCATABLE :: delz(:,:,:) _NULL !< layer thickness (meters) + end type fv_moving_nest_prog_type + + ! TODO deallocate these at end of model run. They are only allocated once, at first nest move, inside mn_static_read_hires(). + ! Note these are only 32 bits for now; matching the precision of the input netCDF files + ! though the model generally handles physics variables with 64 bit precision + type mn_surface_grids + real, allocatable :: orog_grid(:,:) _NULL ! orography -- raw or filtered depending on namelist option, in meters + real, allocatable :: orog_std_grid(:,:) _NULL ! terrain standard deviation for gravity wave drag, in meters (?) + real, allocatable :: ls_mask_grid(:,:) _NULL ! land sea mask -- 0 for ocean/lakes, 1, for land. Perhaps 2 for sea ice. + real, allocatable :: land_frac_grid(:,:) _NULL ! Continuous land fraction - 0.0 ocean, 0.5 half of each, 1.0 all land + + real, allocatable :: parent_orog_grid(:,:) _NULL ! parent orography -- only used for terrain_smoother=1. + ! raw or filtered depending on namelist option,in meters + + ! Soil variables + real, allocatable :: deep_soil_temp_grid(:,:) _NULL ! deep soil temperature at 5m, in degrees K + real, allocatable :: soil_type_grid(:,:) _NULL ! STATSGO soil type + + ! Vegetation variables + real, allocatable :: veg_frac_grid(:,:) _NULL ! vegetation fraction + real, allocatable :: veg_type_grid(:,:) _NULL ! IGBP vegetation type + real, allocatable :: veg_greenness_grid(:,:) _NULL ! NESDIS vegetation greenness; netCDF file has monthly values + + ! Orography variables + real, allocatable :: slope_type_grid(:,:) _NULL ! legacy 1 degree GFS slope type + + ! Albedo variables + real, allocatable :: max_snow_alb_grid(:,:) _NULL ! max snow albedo + real, allocatable :: facsf_grid(:,:) _NULL ! fractional coverage with strong cosz dependency + real, allocatable :: facwf_grid(:,:) _NULL ! fractional coverage with weak cosz dependency + + ! Snow free albedo + ! strong cosz angle dependence = black sky + ! weak cosz angle dependence = white sky + ! From the chgres code in static_data.F90, we see the linkage of variable names: + ! type(esmf_field), public :: alvsf_target_grid !< visible black sky albedo + ! type(esmf_field), public :: alvwf_target_grid !< visible white sky albedo + ! type(esmf_field), public :: alnsf_target_grid !< near ir black sky albedo + ! type(esmf_field), public :: alnwf_target_grid !< near ir white sky albedo + + real, allocatable :: alvsf_grid(:,:) _NULL ! Visible black sky albedo; netCDF file has monthly values + real, allocatable :: alvwf_grid(:,:) _NULL ! Visible white sky albedo; netCDF file has monthly values + real, allocatable :: alnsf_grid(:,:) _NULL ! Near IR black sky albedo; netCDF file has monthly values + real, allocatable :: alnwf_grid(:,:) _NULL ! Near IR white sky albedo; netCDF file has monthly values + + end type mn_surface_grids + + type fv_moving_nest_physics_type + real, _ALLOCATABLE :: ts(:,:) _NULL !< 2D skin temperature/SST + real, _ALLOCATABLE :: slmsk(:,:) _NULL !< land sea mask -- 0 for ocean/lakes, 1, for land. Perhaps 2 for sea ice. + real (kind=kind_phys), _ALLOCATABLE :: smc (:,:,:) _NULL !< soil moisture content + real (kind=kind_phys), _ALLOCATABLE :: stc (:,:,:) _NULL !< soil temperature + real (kind=kind_phys), _ALLOCATABLE :: slc (:,:,:) _NULL !< soil liquid water content + + real (kind=kind_phys), _ALLOCATABLE :: u10m (:,:) _NULL !< 10m u wind (a-grid?) + real (kind=kind_phys), _ALLOCATABLE :: v10m (:,:) _NULL !< 10m v wind (a-grid?) + real (kind=kind_phys), _ALLOCATABLE :: hprime (:,:,:) _NULL !< orographic metrics (maybe standard deviation?) + + real (kind=kind_phys), _ALLOCATABLE :: tprcp (:,:) _NULL !< total (of all precip types) precipitation rate + + real (kind=kind_phys), _ALLOCATABLE :: zorl (:,:) _NULL !< roughness length + real (kind=kind_phys), _ALLOCATABLE :: zorll (:,:) _NULL !< land roughness length + !real (kind=kind_phys), _ALLOCATABLE :: zorli (:,:) _NULL !< ice surface roughness length ! TODO do we need this? + real (kind=kind_phys), _ALLOCATABLE :: zorlw (:,:) _NULL !< wave surface roughness length + real (kind=kind_phys), _ALLOCATABLE :: zorlwav (:,:) _NULL !< wave surface roughness in cm derived from wave model + + real (kind=kind_phys), _ALLOCATABLE :: sfalb_lnd(:,:) _NULL !< surface albedo over land for LSM + real (kind=kind_phys), _ALLOCATABLE :: emis_lnd(:,:) _NULL !< surface emissivity over land for LSM + real (kind=kind_phys), _ALLOCATABLE :: emis_ice(:,:) _NULL !< surface emissivity over ice for LSM + real (kind=kind_phys), _ALLOCATABLE :: emis_wat(:,:) _NULL !< surface emissivity over water for LSM + real (kind=kind_phys), _ALLOCATABLE :: sfalb_lnd_bck(:,:) _NULL !< snow-free albedo over land + + !real (kind=kind_phys), _ALLOCATABLE :: semis(:,:) _NULL !< surface lw emissivity in fraction + !real (kind=kind_phys), _ALLOCATABLE :: semisbase(:,:) _NULL !< background surface emissivity + !real (kind=kind_phys), _ALLOCATABLE :: sfalb(:,:) _NULL !< mean surface diffused sw albedo + + real (kind=kind_phys), _ALLOCATABLE :: alvsf(:,:) _NULL !< visible black sky albedo + real (kind=kind_phys), _ALLOCATABLE :: alvwf(:,:) _NULL !< visible white sky albedo + real (kind=kind_phys), _ALLOCATABLE :: alnsf(:,:) _NULL !< near IR black sky albedo + real (kind=kind_phys), _ALLOCATABLE :: alnwf(:,:) _NULL !< near IR white sky albedo + + real (kind=kind_phys), _ALLOCATABLE :: albdirvis_lnd(:,:) _NULL !< + real (kind=kind_phys), _ALLOCATABLE :: albdirnir_lnd(:,:) _NULL !< + real (kind=kind_phys), _ALLOCATABLE :: albdifvis_lnd(:,:) _NULL !< + real (kind=kind_phys), _ALLOCATABLE :: albdifnir_lnd(:,:) _NULL !< + + real (kind=kind_phys), _ALLOCATABLE :: facsf(:,:) _NULL !< fractional coverage for strong zenith angle albedo + real (kind=kind_phys), _ALLOCATABLE :: facwf(:,:) _NULL !< fractional coverage for strong zenith angle albedo + + real (kind=kind_phys), _ALLOCATABLE :: lakefrac (:,:) _NULL !< lake fraction [0:1] + real (kind=kind_phys), _ALLOCATABLE :: lakedepth (:,:) _NULL !< lake depth [ m ] + + real (kind=kind_phys), _ALLOCATABLE :: canopy (:,:) _NULL !< canopy water content + real (kind=kind_phys), _ALLOCATABLE :: vegfrac (:,:) _NULL !< vegetation fraction + real (kind=kind_phys), _ALLOCATABLE :: uustar (:,:) _NULL !< u* wind in similarity theory + real (kind=kind_phys), _ALLOCATABLE :: shdmin (:,:) _NULL !< min fractional coverage of green vegetation + real (kind=kind_phys), _ALLOCATABLE :: shdmax (:,:) _NULL !< max fractional coverage of green vegetation + real (kind=kind_phys), _ALLOCATABLE :: tsfco (:,:) _NULL !< surface temperature ocean + real (kind=kind_phys), _ALLOCATABLE :: tsfcl (:,:) _NULL !< surface temperature land + real (kind=kind_phys), _ALLOCATABLE :: tsfc (:,:) _NULL !< surface temperature + !real (kind=kind_phys), _ALLOCATABLE :: tsfc_radtime (:,:) _NULL !< surface temperature on radiative timestep + + real (kind=kind_phys), _ALLOCATABLE :: cv (:,:) _NULL !< fraction of convective cloud + real (kind=kind_phys), _ALLOCATABLE :: cvt (:,:) _NULL !< convective cloud top pressure + real (kind=kind_phys), _ALLOCATABLE :: cvb (:,:) _NULL !< convective cloud bottom pressure + + real (kind=kind_phys), _ALLOCATABLE :: phy_f2d (:,:,:) _NULL !< 2D physics variables + real (kind=kind_phys), _ALLOCATABLE :: phy_f3d(:,:,:,:) _NULL !< 3D physics variables + + ! NSST Variables + + real (kind=kind_phys), _ALLOCATABLE :: tref (:,:) _NULL !< reference temperature for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: z_c (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: c_0 (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: c_d (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: w_0 (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: w_d (:,:) _NULL !< coefficient for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xt (:,:) _NULL !< heat content for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xs (:,:) _NULL !< salinity for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xu (:,:) _NULL !< u current constant for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xv (:,:) _NULL !< v current constant for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xz (:,:) _NULL !< DTL thickness for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: zm (:,:) _NULL !< MXL for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xtts (:,:) _NULL !< d(xt)/d(ts) for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: xzts (:,:) _NULL !< d(xz)/d(ts) for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: d_conv (:,:) _NULL !< think of free convection layer for NSSTM + ! real (kind=kind_phys), _ALLOCATABLE :: ifd (:,:) _NULL !< index to start DTM run for NSSTM ! TODO Probably can't interpolate an index. + ! IFD values are 0 for land, and 1 for oceans/lakes -- reverse of the land sea mask + ! Land Sea Mask has values of 0 for oceans/lakes, 1 for land, 2 for sea ice + real (kind=kind_phys), _ALLOCATABLE :: dt_cool (:,:) _NULL !< sub-layer cooling amount for NSSTM + real (kind=kind_phys), _ALLOCATABLE :: qrain (:,:) _NULL !< sensible heat flux due to rainfall for NSSTM + + end type fv_moving_nest_physics_type + + type fv_moving_nest_type + type(fv_moving_nest_flag_type) :: mn_flag ! Mostly namelist variables + type(mn_surface_grids) :: mn_static + type(fv_moving_nest_prog_type) :: mn_prog + type(fv_moving_nest_physics_type) :: mn_phys + + type(grid_geometry) :: parent_geo + type(grid_geometry) :: fp_super_tile_geo + end type fv_moving_nest_type + + ! Moving Nest Namelist Variables + logical, dimension(MAX_NNEST) :: is_moving_nest = .False. + character(len=120) :: surface_dir = "INPUT/moving_nest" + integer, dimension(MAX_NNEST) :: terrain_smoother = 1 ! 0 -- all high-resolution data, 1 - static nest smoothing algorithm with blending zone of 5 points, 2 - blending zone of 10 points, 5 - 5 point smoother, 9 - 9 point smoother + integer, dimension(MAX_NNEST) :: vortex_tracker = 0 ! 0 - not a moving nest, tracker not needed + ! 1 - prescribed nest moving + ! 2 - following child domain center + ! 3 - tracking Min MSLP + ! 6 - simplified version of GFDL tracker, adopted from HWRF's internal vortex tracker. + ! 7 - nearly the full storm tracking algorithm from GFDL vortex tracker. The only part that is missing is the part that gives up when the storm dissipates, which is left out intentionally. Adopted from HWRF's internal vortex tracker. + integer, dimension(MAX_NNEST) :: ntrack = 1 ! number of dt_atmos steps to call the vortex tracker, tracker time step = ntrack*dt_atmos + integer, dimension(MAX_NNEST) :: move_cd_x = 0 ! the number of parent domain grid cells to move in i direction + integer, dimension(MAX_NNEST) :: move_cd_y = 0 ! the number of parent domain grid cells to move in j direction + ! used to control prescribed nest moving, when vortex_tracker=1 + ! the move happens every ntrack*dt_atmos seconds + ! positive is to move in increasing i and j direction, and + ! negative is to move in decreasing i and j direction. + ! 0 means no move. The limitation is to move only 1 grid cell at each move. + integer, dimension(MAX_NNEST) :: corral_x = 5 ! Minimum parent gridpoints on each side of nest in i direction + integer, dimension(MAX_NNEST) :: corral_y = 5 ! Minimum parent gridpoints on each side of nest in j direction + + integer, dimension(MAX_NNEST) :: outatcf_lun = 600 ! base fortran unit number to write out the partial atcfunix file from the internal tracker + + type(fv_moving_nest_type), _ALLOCATABLE, target :: Moving_nest(:) + +contains + + subroutine fv_moving_nest_init(Atm, this_grid) + type(fv_atmos_type), allocatable, intent(in) :: Atm(:) + integer, intent(in) :: this_grid + + integer :: n, ngrids + + ! Allocate the array of fv_moving_nest_type structures of the proper length + allocate(Moving_nest(size(Atm))) + + ! Configure namelist variables + + ngrids = size(Atm) + + call read_input_nml(Atm(1)%nml_filename) !re-reads top level file into internal namelist + + ! Read in namelist + + call read_namelist_moving_nest_nml + + do n=1,ngrids + if (Atm(n)%neststruct%nested) then + Moving_nest(n)%mn_flag%is_moving_nest = is_moving_nest(n) + Moving_nest(n)%mn_flag%surface_dir = trim(surface_dir) + Moving_nest(n)%mn_flag%terrain_smoother = terrain_smoother(n) + Moving_nest(n)%mn_flag%vortex_tracker = vortex_tracker(n) + Moving_nest(n)%mn_flag%ntrack = ntrack(n) + Moving_nest(n)%mn_flag%move_cd_x = move_cd_x(n) + Moving_nest(n)%mn_flag%move_cd_y = move_cd_y(n) + Moving_nest(n)%mn_flag%corral_x = corral_x(n) + Moving_nest(n)%mn_flag%corral_y = corral_y(n) + Moving_nest(n)%mn_flag%outatcf_lun = outatcf_lun(n) + else + Moving_nest(n)%mn_flag%is_moving_nest = .false. + Moving_nest(n)%mn_flag%vortex_tracker = 0 + Moving_nest(n)%mn_flag%ntrack = 1 + Moving_nest(n)%mn_flag%move_cd_x = 0 + Moving_nest(n)%mn_flag%move_cd_y = 0 + Moving_nest(n)%mn_flag%corral_x = 5 + Moving_nest(n)%mn_flag%corral_y = 5 + Moving_nest(n)%mn_flag%outatcf_lun = 600 + endif + enddo + + + call read_input_nml(Atm(this_grid)%nml_filename) !re-reads into internal namelist + + + end subroutine fv_moving_nest_init + + subroutine read_namelist_moving_nest_nml + integer :: f_unit, ios, ierr + namelist /fv_moving_nest_nml/ surface_dir, is_moving_nest, terrain_smoother, & + vortex_tracker, ntrack, move_cd_x, move_cd_y, corral_x, corral_y, outatcf_lun + +#ifdef INTERNAL_FILE_NML + read (input_nml_file,fv_moving_nest_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_moving_nest_nml') +#else + f_unit=open_namelist_file() + rewind (f_unit) + read (f_unit,fv_moving_nest_nml,iostat=ios) + ierr = check_nml_error(ios,'fv_moving_nest_nml') + call close_file(f_unit) +#endif + + end subroutine read_namelist_moving_nest_nml + + subroutine deallocate_fv_moving_nests(n) + integer, intent(in) :: n + + integer :: i + + do i=1,n + call deallocate_fv_moving_nest(i) + enddo + deallocate(Moving_nest) + end subroutine deallocate_fv_moving_nests + + subroutine deallocate_fv_moving_nest(n) + integer, intent(in) :: n + + call deallocate_fv_moving_nest_prog_type(Moving_nest(n)%mn_prog) + call deallocate_fv_moving_nest_physics_type(Moving_nest(n)%mn_phys) + + end subroutine deallocate_fv_moving_nest + + + subroutine allocate_fv_moving_nest_prog_type(isd, ied, jsd, jed, npz, mn_prog) + integer, intent(in) :: isd, ied, jsd, jed, npz + type(fv_moving_nest_prog_type), intent(inout) :: mn_prog + + allocate ( mn_prog%delz(isd:ied, jsd:jed, 1:npz) ) + mn_prog%delz = +99999.9 + + end subroutine allocate_fv_moving_nest_prog_type + + subroutine deallocate_fv_moving_nest_prog_type(mn_prog) + type(fv_moving_nest_prog_type), intent(inout) :: mn_prog + + if (allocated(mn_prog%delz)) deallocate(mn_prog%delz) + + end subroutine deallocate_fv_moving_nest_prog_type + + subroutine allocate_fv_moving_nest_physics_type(isd, ied, jsd, jed, npz, move_physics, move_nsst, lsoil, nmtvr, levs, ntot2d, ntot3d, mn_phys) + integer, intent(in) :: isd, ied, jsd, jed, npz + logical, intent(in) :: move_physics, move_nsst + integer, intent(in) :: lsoil, nmtvr, levs, ntot2d, ntot3d ! From IPD_Control + type(fv_moving_nest_physics_type), intent(inout) :: mn_phys + + ! The local/temporary variables need to be allocated to the larger data (compute + halos) domain so that the nest motion code has halos to use + allocate ( mn_phys%ts(isd:ied, jsd:jed) ) + + if (move_physics) then + allocate ( mn_phys%slmsk(isd:ied, jsd:jed) ) + allocate ( mn_phys%smc(isd:ied, jsd:jed, lsoil) ) + allocate ( mn_phys%stc(isd:ied, jsd:jed, lsoil) ) + allocate ( mn_phys%slc(isd:ied, jsd:jed, lsoil) ) + + allocate ( mn_phys%sfalb_lnd(isd:ied, jsd:jed) ) + allocate ( mn_phys%emis_lnd(isd:ied, jsd:jed) ) + allocate ( mn_phys%emis_ice(isd:ied, jsd:jed) ) + allocate ( mn_phys%emis_wat(isd:ied, jsd:jed) ) + allocate ( mn_phys%sfalb_lnd_bck(isd:ied, jsd:jed) ) + + !allocate ( mn_phys%semis(isd:ied, jsd:jed) ) + !allocate ( mn_phys%semisbase(isd:ied, jsd:jed) ) + !allocate ( mn_phys%sfalb(isd:ied, jsd:jed) ) + + allocate ( mn_phys%u10m(isd:ied, jsd:jed) ) + allocate ( mn_phys%v10m(isd:ied, jsd:jed) ) + allocate ( mn_phys%tprcp(isd:ied, jsd:jed) ) + + allocate ( mn_phys%hprime(isd:ied, jsd:jed, nmtvr) ) + + allocate ( mn_phys%zorl(isd:ied, jsd:jed) ) + allocate ( mn_phys%zorll(isd:ied, jsd:jed) ) + allocate ( mn_phys%zorlwav(isd:ied, jsd:jed) ) + allocate ( mn_phys%zorlw(isd:ied, jsd:jed) ) + + allocate ( mn_phys%alvsf(isd:ied, jsd:jed) ) + allocate ( mn_phys%alvwf(isd:ied, jsd:jed) ) + allocate ( mn_phys%alnsf(isd:ied, jsd:jed) ) + allocate ( mn_phys%alnwf(isd:ied, jsd:jed) ) + + allocate ( mn_phys%facsf(isd:ied, jsd:jed) ) + allocate ( mn_phys%facwf(isd:ied, jsd:jed) ) + + allocate ( mn_phys%lakefrac(isd:ied, jsd:jed) ) + allocate ( mn_phys%lakedepth(isd:ied, jsd:jed) ) + + allocate ( mn_phys%canopy(isd:ied, jsd:jed) ) + allocate ( mn_phys%vegfrac(isd:ied, jsd:jed) ) + allocate ( mn_phys%uustar(isd:ied, jsd:jed) ) + allocate ( mn_phys%shdmin(isd:ied, jsd:jed) ) + allocate ( mn_phys%shdmax(isd:ied, jsd:jed) ) + allocate ( mn_phys%tsfco(isd:ied, jsd:jed) ) + allocate ( mn_phys%tsfcl(isd:ied, jsd:jed) ) + allocate ( mn_phys%tsfc(isd:ied, jsd:jed) ) + !allocate ( mn_phys%tsfc_radtime(isd:ied, jsd:jed) ) + + + allocate ( mn_phys%albdirvis_lnd (isd:ied, jsd:jed) ) + allocate ( mn_phys%albdirnir_lnd (isd:ied, jsd:jed) ) + allocate ( mn_phys%albdifvis_lnd (isd:ied, jsd:jed) ) + allocate ( mn_phys%albdifnir_lnd (isd:ied, jsd:jed) ) + + allocate ( mn_phys%cv(isd:ied, jsd:jed) ) + allocate ( mn_phys%cvt(isd:ied, jsd:jed) ) + allocate ( mn_phys%cvb(isd:ied, jsd:jed) ) + + allocate ( mn_phys%phy_f2d(isd:ied, jsd:jed, ntot2d) ) + allocate ( mn_phys%phy_f3d(isd:ied, jsd:jed, levs, ntot3d) ) + end if + + if (move_nsst) then + allocate ( mn_phys%tref(isd:ied, jsd:jed) ) + allocate ( mn_phys%z_c(isd:ied, jsd:jed) ) + allocate ( mn_phys%c_0(isd:ied, jsd:jed) ) + allocate ( mn_phys%c_d(isd:ied, jsd:jed) ) + allocate ( mn_phys%w_0(isd:ied, jsd:jed) ) + allocate ( mn_phys%w_d(isd:ied, jsd:jed) ) + allocate ( mn_phys%xt(isd:ied, jsd:jed) ) + allocate ( mn_phys%xs(isd:ied, jsd:jed) ) + allocate ( mn_phys%xu(isd:ied, jsd:jed) ) + allocate ( mn_phys%xv(isd:ied, jsd:jed) ) + allocate ( mn_phys%xz(isd:ied, jsd:jed) ) + allocate ( mn_phys%zm(isd:ied, jsd:jed) ) + allocate ( mn_phys%xtts(isd:ied, jsd:jed) ) + allocate ( mn_phys%xzts(isd:ied, jsd:jed) ) + allocate ( mn_phys%d_conv(isd:ied, jsd:jed) ) + !allocate ( mn_phys%ifd(isd:ied, jsd:jed) ) + allocate ( mn_phys%dt_cool(isd:ied, jsd:jed) ) + allocate ( mn_phys%qrain(isd:ied, jsd:jed) ) + end if + + mn_phys%ts = +99999.9 + if (move_physics) then + mn_phys%slmsk = +99999.9 + mn_phys%smc = +99999.9 + mn_phys%stc = +99999.9 + mn_phys%slc = +99999.9 + + + mn_phys%sfalb_lnd = +99999.9 + mn_phys%emis_lnd = +99999.9 + mn_phys%emis_ice = +99999.9 + mn_phys%emis_wat = +99999.9 + mn_phys%sfalb_lnd_bck = +99999.9 + + !mn_phys%semis = +99999.9 + !mn_phys%semisbase = +99999.9 + !mn_phys%sfalb = +99999.9 + + mn_phys%u10m = +99999.9 + mn_phys%v10m = +99999.9 + mn_phys%tprcp = +99999.9 + + mn_phys%hprime = +99999.9 + + mn_phys%zorl = +99999.9 + mn_phys%zorll = +99999.9 + mn_phys%zorlwav = +99999.9 + mn_phys%zorlw = +99999.9 + + mn_phys%alvsf = +99999.9 + mn_phys%alvwf = +99999.9 + mn_phys%alnsf = +99999.9 + mn_phys%alnwf = +99999.9 + + mn_phys%facsf = +99999.9 + mn_phys%facwf = +99999.9 + + mn_phys%lakefrac = +99999.9 + mn_phys%lakedepth = +99999.9 + + mn_phys%canopy = +99999.9 + mn_phys%vegfrac = +99999.9 + mn_phys%uustar = +99999.9 + mn_phys%shdmin = +99999.9 + mn_phys%shdmax = +99999.9 + mn_phys%tsfco = +99999.9 + mn_phys%tsfcl = +99999.9 + mn_phys%tsfc = +99999.9 + !mn_phys%tsfc_radtime = +99999.9 + + mn_phys%albdirvis_lnd = +99999.9 + mn_phys%albdirnir_lnd = +99999.9 + mn_phys%albdifvis_lnd = +99999.9 + mn_phys%albdifnir_lnd = +99999.9 + + mn_phys%cv = +99999.9 + mn_phys%cvt = +99999.9 + mn_phys%cvb = +99999.9 + + mn_phys%phy_f2d = +99999.9 + mn_phys%phy_f3d = +99999.9 + end if + + if (move_nsst) then + mn_phys%tref = +99999.9 + mn_phys%z_c = +99999.9 + mn_phys%c_0 = +99999.9 + mn_phys%c_d = +99999.9 + mn_phys%w_0 = +99999.9 + mn_phys%w_d = +99999.9 + mn_phys%xt = +99999.9 + mn_phys%xs = +99999.9 + mn_phys%xu = +99999.9 + mn_phys%xv = +99999.9 + mn_phys%xz = +99999.9 + mn_phys%zm = +99999.9 + mn_phys%xtts = +99999.9 + mn_phys%xzts = +99999.9 + mn_phys%d_conv = +99999.9 + !mn_phys%ifd = +99999.9 + mn_phys%dt_cool = +99999.9 + mn_phys%qrain = +99999.9 + end if + + end subroutine allocate_fv_moving_nest_physics_type + + + subroutine deallocate_fv_moving_nest_physics_type(mn_phys) + type(fv_moving_nest_physics_type), intent(inout) :: mn_phys + + if (allocated(mn_phys%ts)) then + deallocate ( mn_phys%ts ) + else + ! If ts was not allocated, then none of this structure was allocated. + return + end if + + ! if move_phys + if (allocated(mn_phys%smc)) then + deallocate( mn_phys%slmsk ) + deallocate( mn_phys%smc ) + deallocate( mn_phys%stc ) + deallocate( mn_phys%slc ) + + deallocate( mn_phys%sfalb_lnd ) + deallocate( mn_phys%emis_lnd ) + deallocate( mn_phys%emis_ice ) + deallocate( mn_phys%emis_wat ) + deallocate( mn_phys%sfalb_lnd_bck ) + + !deallocate( mn_phys%semis ) + !deallocate( mn_phys%semisbase ) + !deallocate( mn_phys%sfalb ) + + deallocate( mn_phys%u10m ) + deallocate( mn_phys%v10m ) + deallocate( mn_phys%tprcp ) + + deallocate( mn_phys%hprime ) + + deallocate( mn_phys%zorl ) + deallocate( mn_phys%zorll ) + deallocate( mn_phys%zorlwav ) + deallocate( mn_phys%zorlw ) + + deallocate( mn_phys%alvsf ) + deallocate( mn_phys%alvwf ) + deallocate( mn_phys%alnsf ) + deallocate( mn_phys%alnwf ) + + deallocate( mn_phys%facsf ) + deallocate( mn_phys%facwf ) + + deallocate( mn_phys%lakefrac ) + deallocate( mn_phys%lakedepth ) + + deallocate( mn_phys%canopy ) + deallocate( mn_phys%vegfrac ) + deallocate( mn_phys%uustar ) + deallocate( mn_phys%shdmin ) + deallocate( mn_phys%shdmax ) + deallocate( mn_phys%tsfco ) + deallocate( mn_phys%tsfcl ) + deallocate( mn_phys%tsfc ) + !deallocate( mn_phys%tsfc_radtime ) + + deallocate( mn_phys%albdirvis_lnd ) + deallocate( mn_phys%albdirnir_lnd ) + deallocate( mn_phys%albdifvis_lnd ) + deallocate( mn_phys%albdifnir_lnd ) + + deallocate( mn_phys%cv ) + deallocate( mn_phys%cvt ) + deallocate( mn_phys%cvb ) + + deallocate( mn_phys%phy_f2d ) + deallocate( mn_phys%phy_f3d ) + end if + + ! if move_nsst + if (allocated( mn_phys%tref )) then + deallocate( mn_phys%tref ) + deallocate( mn_phys%z_c ) + deallocate( mn_phys%c_0 ) + deallocate( mn_phys%c_d ) + deallocate( mn_phys%w_0 ) + deallocate( mn_phys%w_d ) + deallocate( mn_phys%xt ) + deallocate( mn_phys%xs ) + deallocate( mn_phys%xu ) + deallocate( mn_phys%xv ) + deallocate( mn_phys%xz ) + deallocate( mn_phys%zm ) + deallocate( mn_phys%xtts ) + deallocate( mn_phys%xzts ) + deallocate( mn_phys%d_conv ) + !deallocate( mn_phys%ifd ) + deallocate( mn_phys%dt_cool ) + deallocate( mn_phys%qrain ) + end if + + end subroutine deallocate_fv_moving_nest_physics_type + +end module fv_moving_nest_types_mod diff --git a/moving_nest/fv_moving_nest_utils.F90 b/moving_nest/fv_moving_nest_utils.F90 new file mode 100644 index 000000000..609a327ce --- /dev/null +++ b/moving_nest/fv_moving_nest_utils.F90 @@ -0,0 +1,2159 @@ +!*********************************************************************** +!* GNU General Public License * +!* This file is a part of fvGFS. * +!* * +!* fvGFS is free software; you can redistribute it and/or modify it * +!* and are expected to follow the terms of the GNU General Public * +!* License as published by the Free Software Foundation; either * +!* version 2 of the License, or (at your option) any later version. * +!* * +!* fvGFS is distributed in the hope that it will be useful, but * +!* WITHOUT ANY WARRANTY; without even the implied warranty of * +!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * +!* General Public License for more details. * +!* * +!* For the full text of the GNU General Public License, * +!* write to: Free Software Foundation, Inc., * +!* 675 Mass Ave, Cambridge, MA 02139, USA. * +!* or see: http://www.gnu.org/licenses/gpl.html * +!*********************************************************************** + +!*********************************************************************** +!> @file +!! @brief Provides subroutines to enable moving nest functionality in FV3 dynamic core. +!! @author W. Ramstrom, AOML/HRD 01/15/2021 +!! @email William.Ramstrom@noaa.gov +! =======================================================================! + +module fv_moving_nest_utils_mod + + use mpp_mod, only: FATAL, WARNING, MPP_DEBUG, NOTE, MPP_CLOCK_SYNC,MPP_CLOCK_DETAILED + use mpp_mod, only: mpp_pe, mpp_npes, mpp_root_pe, mpp_error, mpp_set_warn_level + use mpp_mod, only: mpp_declare_pelist, mpp_set_current_pelist, mpp_sync, mpp_sync_self + use mpp_mod, only: input_nml_file + use mpp_mod, only: mpp_get_current_pelist, mpp_broadcast + use mpp_domains_mod, only: GLOBAL_DATA_DOMAIN, BITWISE_EXACT_SUM, BGRID_NE, CGRID_NE, DGRID_NE, AGRID + use mpp_parameter_mod, only: AGRID_PARAM=>AGRID,CGRID_NE_PARAM=>CGRID_NE,SCALAR_PAIR + use mpp_domains_mod, only: FOLD_SOUTH_EDGE, FOLD_NORTH_EDGE, FOLD_WEST_EDGE, FOLD_EAST_EDGE + use mpp_domains_mod, only: MPP_DOMAIN_TIME, CYCLIC_GLOBAL_DOMAIN, NUPDATE,EUPDATE, XUPDATE, YUPDATE, SCALAR_PAIR + use mpp_domains_mod, only: domain1D, domain2D, DomainCommunicator2D, BITWISE_EFP_SUM + use mpp_domains_mod, only: mpp_get_compute_domain, mpp_get_data_domain, mpp_domains_set_stack_size + use mpp_domains_mod, only: mpp_global_field, mpp_global_sum, mpp_global_max, mpp_global_min + use mpp_domains_mod, only: mpp_domains_init, mpp_domains_exit, mpp_broadcast_domain + use mpp_domains_mod, only: mpp_update_domains, mpp_check_field, mpp_redistribute, mpp_get_memory_domain + use mpp_domains_mod, only: mpp_define_layout, mpp_define_domains, mpp_modify_domain + use mpp_domains_mod, only: mpp_define_io_domain + use mpp_domains_mod, only: mpp_get_neighbor_pe, mpp_define_mosaic, mpp_nullify_domain_list + use mpp_domains_mod, only: NORTH, NORTH_EAST, EAST, SOUTH_EAST, CORNER, CENTER + use mpp_domains_mod, only: SOUTH, SOUTH_WEST, WEST, NORTH_WEST, mpp_define_mosaic_pelist + use mpp_domains_mod, only: mpp_get_global_domain, ZERO, NINETY, MINUS_NINETY + use mpp_domains_mod, only: mpp_get_boundary, mpp_start_update_domains, mpp_complete_update_domains + use mpp_domains_mod, only: mpp_define_nest_domains, nest_domain_type + use mpp_domains_mod, only: mpp_get_C2F_index, mpp_update_nest_fine + use mpp_domains_mod, only: mpp_get_F2C_index, mpp_update_nest_coarse + use mpp_domains_mod, only: mpp_get_domain_shift, EDGEUPDATE, mpp_deallocate_domain + use mpp_domains_mod, only: mpp_group_update_type, mpp_create_group_update + use mpp_domains_mod, only: mpp_do_group_update, mpp_clear_group_update + use mpp_domains_mod, only: mpp_start_group_update, mpp_complete_group_update + use mpp_domains_mod, only: WUPDATE, SUPDATE, mpp_get_compute_domains, NONSYMEDGEUPDATE + use mpp_domains_mod, only: domainUG, mpp_define_unstruct_domain, mpp_get_UG_domain_tile_id + use mpp_domains_mod, only: mpp_get_UG_compute_domain, mpp_pass_SG_to_UG, mpp_pass_UG_to_SG + use mpp_domains_mod, only: mpp_get_ug_global_domain, mpp_global_field_ug + use mpp_memutils_mod, only: mpp_memuse_begin, mpp_memuse_end + +#ifdef GFS_TYPES + use GFS_typedefs, only: kind_phys +#else + use IPD_typedefs, only: kind_phys => IPD_kind_phys +#endif + + use constants_mod, only: grav + + use boundary_mod, only: update_coarse_grid, update_coarse_grid_mpp + use bounding_box_mod, only: bbox, bbox_get_C2F_index, fill_bbox + use fms2_io_mod, only: read_data, write_data, open_file, close_file, register_axis, register_field + use fms2_io_mod, only: FmsNetcdfDomainFile_t, FmsNetcdfFile_t, is_dimension_registered + + use fv_arrays_mod, only: R_GRID + use fv_arrays_mod, only: fv_grid_type, fv_nest_type, fv_atmos_type + use fv_surf_map_mod, only: FV3_zs_filter + use fv_moving_nest_types_mod, only: grid_geometry + use ifport, only: getcwd + + implicit none + +#ifdef NO_QUAD_PRECISION + ! 64-bit precision (kind=8) + integer, parameter:: f_p = selected_real_kind(15) +#else + ! Higher precision (kind=16) for grid geometrical factors: + integer, parameter:: f_p = selected_real_kind(20) +#endif + + integer, parameter :: UWIND = 1 + integer, parameter :: VWIND = 2 + + logical :: debug_log = .false. + + +#include + + + interface alloc_read_data +#ifdef OVERLOAD_R8 + module procedure alloc_read_data_r4_2d +#endif + module procedure alloc_read_data_r8_2d + end interface alloc_read_data + + interface fill_nest_halos_from_parent + module procedure fill_nest_halos_from_parent_r4_2d + module procedure fill_nest_halos_from_parent_r4_3d + module procedure fill_nest_halos_from_parent_r4_4d + + module procedure fill_nest_halos_from_parent_r8_2d + module procedure fill_nest_halos_from_parent_r8_3d + module procedure fill_nest_halos_from_parent_r8_4d + end interface fill_nest_halos_from_parent + + interface alloc_halo_buffer + module procedure alloc_halo_buffer_r4_2d + module procedure alloc_halo_buffer_r4_3d + module procedure alloc_halo_buffer_r4_4d + + module procedure alloc_halo_buffer_r8_2d + module procedure alloc_halo_buffer_r8_3d + module procedure alloc_halo_buffer_r8_4d + end interface alloc_halo_buffer + + interface fill_nest_from_buffer + module procedure fill_nest_from_buffer_r4_2d + module procedure fill_nest_from_buffer_r4_3d + module procedure fill_nest_from_buffer_r4_4d + + module procedure fill_nest_from_buffer_r8_2d + module procedure fill_nest_from_buffer_r8_3d + module procedure fill_nest_from_buffer_r8_4d + end interface fill_nest_from_buffer + + interface fill_nest_from_buffer_cell_center + module procedure fill_nest_from_buffer_cell_center_r4_2d + module procedure fill_nest_from_buffer_cell_center_r4_3d + module procedure fill_nest_from_buffer_cell_center_r4_4d + + module procedure fill_nest_from_buffer_cell_center_r8_2d + module procedure fill_nest_from_buffer_cell_center_r8_3d + module procedure fill_nest_from_buffer_cell_center_r8_4d + end interface fill_nest_from_buffer_cell_center + + interface output_grid_to_nc + module procedure output_grid_to_nc_2d + module procedure output_grid_to_nc_3d + end interface output_grid_to_nc + + interface fill_grid_from_supergrid + module procedure fill_grid_from_supergrid_r4_3d + module procedure fill_grid_from_supergrid_r8_3d + module procedure fill_grid_from_supergrid_r8_4d + end interface fill_grid_from_supergrid + + +contains + + ! GEMPAK 5-point smoother + !SM5S Smooth scalar grid using a 5-point smoother + ! SM5S ( S ) = .5 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) + + ! S (i-1,j) + S (i,j-1) ) + ! GEMPAK 9-point smoother + !SM9S Smooth scalar grid using a 9-point smoother + ! SM5S ( S ) = .25 * S (i,j) + .125 * ( S (i+1,j) + S (i,j+1) + + ! S (i-1,j) + S (i,j-1) ) + ! + .0625 * ( S (i+1,j+1) + + ! S (i+1,j-1) + + ! S (i-1,j+1) + + ! S (i-1,j-1) ) + + + subroutine smooth_5_point(data_var, i, j, val) + real, allocatable, intent(in) :: data_var(:,:) + integer :: i,j + real, intent(out) :: val + + ! Stay in bounds of the array + if ( (i-1) .ge. lbound(data_var,1) .and. i .le. ubound(data_var,1) .and. (j-1) .ge. lbound(data_var,2) .and. j .le. ubound(data_var,2) ) then + val = .5 * data_var(i,j) + .125 * ( data_var(i+1,j) + data_var(i,j+1) + data_var(i-1,j) + data_var(i,j-1) ) + else + ! Don't smooth if at the edge. Could do partial smoothing here also, but don't expect moving nest to reach the edge. + val = data_var(i,j) + endif + + end subroutine smooth_5_point + + + subroutine smooth_9_point(data_var, i, j, val) + real, allocatable, intent(in) :: data_var(:,:) + integer :: i,j + real, intent(out) :: val + + ! Stay in bounds of the array + if ( (i-1) .ge. lbound(data_var,1) .and. i .le. ubound(data_var,1) .and. (j-1) .ge. lbound(data_var,2) .and. j .le. ubound(data_var,2) ) then + val = .25 * data_var(i,j) + .125 * ( data_var(i+1,j) + data_var(i,j+1) + data_var(i-1,j) + data_var(i,j-1) ) & + + .0625 * ( data_var(i+1,j+1) + data_var(i+1,j-1) + data_var(i-1,j+1) + data_var(i-1,j-1) ) + else + ! Don't smooth if at the edge. Could do partial smoothing here also, but don't expect moving nest to reach the edge. + val = data_var(i,j) + endif + + end subroutine smooth_9_point + + ! blend_size is 5 for static nests. We may increase it for moving nests. + ! This is only called for fine PEs. + ! Blends a few points into the nest. Calls zs filtering if enabled in namelist. + subroutine set_blended_terrain(Atm, parent_orog_grid, nest_orog_grid, refine, halo_size, blend_size, a_step) + type(fv_atmos_type), intent(inout), target :: Atm + real, allocatable, intent(in) :: parent_orog_grid(:,:) ! Coarse grid orography + real, allocatable, intent(in) :: nest_orog_grid(:,:) ! orography for the full panel of the parent, at high-resolution + integer, intent(in) :: refine, halo_size, blend_size, a_step + + integer :: i, j, ic, jc + integer :: ioffset, joffset + integer :: npx, npy, isd, ied, jsd, jed + real :: smoothed_orog, hires_orog, blend_wt, blend_orog + + real, pointer, dimension(:,:,:) :: wt + integer, pointer, dimension(:,:,:) :: ind + integer :: this_pe + + this_pe = mpp_pe() + + npx = Atm%npx + npy = Atm%npy + + isd = Atm%bd%isc - halo_size + ied = Atm%bd%iec + halo_size + jsd = Atm%bd%jsc - halo_size + jed = Atm%bd%jec + halo_size + + ioffset = Atm%neststruct%ioffset + joffset = Atm%neststruct%joffset + + wt => Atm%neststruct%wt_h + ind => Atm%neststruct%ind_h + + do j=jsd, jed + do i=isd, ied + ic = ind(i,j,1) + jc = ind(i,j,2) + + smoothed_orog = & + wt(i,j,1)*parent_orog_grid(ic, jc ) + & + wt(i,j,2)*parent_orog_grid(ic, jc+1) + & + wt(i,j,3)*parent_orog_grid(ic+1,jc+1) + & + wt(i,j,4)*parent_orog_grid(ic+1,jc ) + + hires_orog = nest_orog_grid((ioffset-1)*refine+i, (joffset-1)*refine+j) + + ! From tools/external_ic.F90 + if (blend_size .eq. 10) then + blend_wt = max(0.,min(1.,real(10 - min(i,j,npx-i,npy-j,10))/10. )) + else + blend_wt = max(0.,min(1.,real(5 - min(i,j,npx-i,npy-j,5))/5. )) + end if + + !blend_wt = max(0.,min(1.,real(blend_size - min(i,j,npx-i,npy-j,blend_size))/real(blend_size) )) + blend_orog = (1.-blend_wt)*hires_orog + blend_wt*smoothed_orog + + Atm%phis(i,j) = blend_orog * grav + + enddo + enddo + + + ! From tools/fv_surf_map.F90::surfdrv() + if ( Atm%flagstruct%full_zs_filter ) then + !if(is_master()) then + ! write(*,*) 'Applying terrain filters. zero_ocean is', zero_ocean + !endif + !call FV3_zs_filter (bd, isd, ied, jsd, jed, npx, npy, npx_global, & + ! stretch_fac, bounded_domain, domain, area, dxa, dya, dx, dy, dxc, dyc, grid, & + ! agrid, sin_sg, phis, oro_g) + + call FV3_zs_filter (Atm%bd, isd, ied, jsd, jed, Atm%npx, Atm%npy, Atm%neststruct%npx_global, & + Atm%flagstruct%stretch_fac, Atm%gridstruct%bounded_domain, Atm%domain, & + Atm%gridstruct%area_64, Atm%gridstruct%dxa, Atm%gridstruct%dya, & + Atm%gridstruct%dx, Atm%gridstruct%dy, & + Atm%gridstruct%dxc, Atm%gridstruct%dyc, & + Atm%gridstruct%grid_64, & + Atm%gridstruct%agrid_64, Atm%gridstruct%sin_sg, Atm%phis, parent_orog_grid) + + call mpp_update_domains(Atm%phis, Atm%domain) + endif ! end terrain filter + + end subroutine set_blended_terrain + + subroutine set_smooth_nest_terrain(Atm, fp_orog, refine, num_points, halo_size, blend_size) + type(fv_atmos_type), intent(inout) :: Atm + real, allocatable, intent(in) :: fp_orog(:,:) ! orography for the full panel of the parent, at high-resolution + integer, intent(in) :: refine, num_points, halo_size, blend_size + + integer :: i,j + integer :: ioffset, joffset + integer :: npx, npy, isd, ied, jsd, jed + integer :: smooth_i_lo, smooth_i_hi, smooth_j_lo, smooth_j_hi + real :: smoothed_orog + character(len=16) :: errstring + + npx = Atm%npx + npy = Atm%npy + + isd = Atm%bd%isc - halo_size + ied = Atm%bd%iec + halo_size + jsd = Atm%bd%jsc - halo_size + jed = Atm%bd%jec + halo_size + + ioffset = Atm%neststruct%ioffset + joffset = Atm%neststruct%joffset + + smooth_i_lo = 1 + blend_size + smooth_i_hi = npx - blend_size - halo_size + + smooth_j_lo = 1 + blend_size + smooth_j_hi = npy - blend_size - halo_size + + !Atm(n)%phis(isd:ied, jsd:jed) = mn_static%orog_grid((ioffset-1)*x_refine+isd:(ioffset-1)*x_refine+ied, (joffset-1)*y_refine+jsd:(joffset-1)*y_refine+jed) * grav + + select case(num_points) + case (5) + + do j=jsd, jed + do i=isd, ied + if (i .lt. smooth_i_lo .or. i .gt. smooth_i_hi .or. j .lt. smooth_j_lo .or. j .gt. smooth_j_hi) then + call smooth_5_point(fp_orog, (ioffset-1)*refine + i, (joffset-1)*refine + j, smoothed_orog) + Atm%phis(i,j) = smoothed_orog * grav + else + Atm%phis(i,j) = fp_orog((ioffset-1)*refine + i, (joffset-1)*refine + j) * grav + endif + enddo + enddo + + case (9) + + do j=jsd, jed + do i=isd, ied + if (i .lt. smooth_i_lo .or. i .gt. smooth_i_hi .or. j .lt. smooth_j_lo .or. j .gt. smooth_j_hi) then + call smooth_9_point(fp_orog, (ioffset-1)*refine + i, (joffset-1)*refine + j, smoothed_orog) + Atm%phis(i,j) = smoothed_orog * grav + else + Atm%phis(i,j) = fp_orog((ioffset-1)*refine + i, (joffset-1)*refine + j) * grav + endif + enddo + enddo + + case default + write (errstring, "(I0)") num_points + call mpp_error(FATAL,'Invalid terrain_smoother in set_smooth_nest_terrain '//errstring) + end select + + end subroutine set_smooth_nest_terrain + + !================================================================================================== + ! + ! Fill Nest Halos from Parent + ! + !================================================================================================== + + subroutine fill_nest_halos_from_parent_r4_2d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position) + character(len=*), intent(in) :: var_name + real*4, allocatable, intent(inout) :: data_var(:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position + + real*4, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_r4_2d + + + subroutine fill_nest_halos_from_parent_r8_2d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this also be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position + + + real*8, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind) + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_r8_2d + + + subroutine fill_nest_halos_from_parent_masked(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, mask_var, mask_val, default_val) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this also be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position + real*4, allocatable, intent(in) :: mask_var(:,:) + integer, intent(in) :: mask_val + real*8, intent(in) :: default_val + + real*8, dimension(:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer_masked(interp_type, data_var, nbuffer, north_fine, north_coarse, NORTH, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + call fill_nest_from_buffer_masked(interp_type, data_var, sbuffer, south_fine, south_coarse, SOUTH, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + call fill_nest_from_buffer_masked(interp_type, data_var, ebuffer, east_fine, east_coarse, EAST, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + call fill_nest_from_buffer_masked(interp_type, data_var, wbuffer, west_fine, west_coarse, WEST, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_masked + + + subroutine fill_nest_halos_from_parent_r4_3d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*4, allocatable, intent(inout) :: data_var(:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*4, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_r4_3d + + + subroutine fill_nest_halos_from_parent_r8_3d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*8, dimension(:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: this_pe + integer :: nest_level = 1 ! TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz) + + ! Passes data from coarse grid to fine grid's halo + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_r8_3d + + + subroutine fill_nest_halos_from_parent_r4_4d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*4, allocatable, intent(inout) :: data_var(:,:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*4, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: n4d, this_pe + integer :: nest_level = 1 ! TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + n4d = ubound(data_var, 4) + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + !==================================================== + ! Passes data from coarse grid to fine grid's halo + ! Coarse parent PEs send data from data_var + ! Fine halo PEs receive data into one or more of the halo buffers + !==================================================== + + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_r4_4d + + + subroutine fill_nest_halos_from_parent_r8_4d(var_name, data_var, interp_type, wt, ind, x_refine, y_refine, is_fine_pe, nest_domain, position, nz) + character(len=*), intent(in) :: var_name + real*8, allocatable, intent(inout) :: data_var(:,:,:,:) + integer, intent(in) :: interp_type + real, allocatable, intent(in) :: wt(:,:,:) ! TODO should this be real*8? + integer, allocatable, intent(in) :: ind(:,:,:) + integer, intent(in) :: x_refine, y_refine + logical, intent(in) :: is_fine_pe + type(nest_domain_type), intent(inout) :: nest_domain + integer, intent(in) :: position, nz + + real*8, dimension(:,:,:,:), allocatable :: nbuffer, sbuffer, ebuffer, wbuffer + type(bbox) :: north_fine, north_coarse + type(bbox) :: south_fine, south_coarse + type(bbox) :: east_fine, east_coarse + type(bbox) :: west_fine, west_coarse + integer :: n4d, this_pe + integer :: nest_level = 1 ! TODO allow to vary + + this_pe = mpp_pe() + + !!=========================================================== + !! + !! Fill halo buffers + !! + !!=========================================================== + + n4d = ubound(data_var, 4) + + call alloc_halo_buffer(nbuffer, north_fine, north_coarse, nest_domain, NORTH, position, nz, n4d) + call alloc_halo_buffer(sbuffer, south_fine, south_coarse, nest_domain, SOUTH, position, nz, n4d) + call alloc_halo_buffer(ebuffer, east_fine, east_coarse, nest_domain, EAST, position, nz, n4d) + call alloc_halo_buffer(wbuffer, west_fine, west_coarse, nest_domain, WEST, position, nz, n4d) + + !==================================================== + ! Passes data from coarse grid to fine grid's halo + ! Coarse parent PEs send data from data_var + ! Fine halo PEs receive data into one or more of the halo buffers + !==================================================== + + call mpp_update_nest_fine(data_var, nest_domain, wbuffer, sbuffer, ebuffer, nbuffer, nest_level, position=position) + + if (is_fine_pe) then + + !!=========================================================== + !! + !! Apply halo data + !! + !!=========================================================== + + call fill_nest_from_buffer(interp_type, data_var, nbuffer, north_fine, north_coarse, nz, NORTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, sbuffer, south_fine, south_coarse, nz, SOUTH, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, ebuffer, east_fine, east_coarse, nz, EAST, x_refine, y_refine, wt, ind) + call fill_nest_from_buffer(interp_type, data_var, wbuffer, west_fine, west_coarse, nz, WEST, x_refine, y_refine, wt, ind) + + endif + + deallocate(nbuffer) + deallocate(sbuffer) + deallocate(ebuffer) + deallocate(wbuffer) + + end subroutine fill_nest_halos_from_parent_r8_4d + + + !================================================================================================== + ! + ! Allocate halo buffers + ! + !================================================================================================== + + subroutine alloc_halo_buffer_r8_2d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position) + real*8, dimension(:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je)) + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + allocate(buffer(1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r8_2d + + + subroutine alloc_halo_buffer_r4_2d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position) + real*4, dimension(:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je)) + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + allocate(buffer(1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r4_2d + + + subroutine alloc_halo_buffer_r4_3d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz) + real*4, dimension(:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz + + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je,1:nz)) + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + allocate(buffer(1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r4_3d + + + subroutine alloc_halo_buffer_r8_3d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz) + real*8, dimension(:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je,1:nz)) + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + allocate(buffer(1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r8_3d + + + subroutine alloc_halo_buffer_r4_4d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz, n4d) + real*4, dimension(:,:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz, n4d + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je, 1:nz, 1:n4d)) + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + allocate(buffer(1,1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r4_4d + + + subroutine alloc_halo_buffer_r8_4d(buffer, bbox_fine, bbox_coarse, nest_domain, direction, position, nz, n4d) + real*8, dimension(:,:,:,:), allocatable, intent(out) :: buffer + type(bbox), intent(out) :: bbox_fine, bbox_coarse + type(nest_domain_type), intent(in) :: nest_domain + integer, intent(in) :: direction, position, nz, n4d + + call bbox_get_C2F_index(nest_domain, bbox_fine, bbox_coarse, direction, position) + + if( bbox_coarse.ie .GE. bbox_coarse.is .AND. bbox_coarse.je .GE. bbox_coarse.js ) then + allocate(buffer(bbox_coarse.is:bbox_coarse.ie, bbox_coarse.js:bbox_coarse.je, 1:nz, 1:n4d)) + else + ! The buffer must have some storage allocated, whether it's a useful buffer or just a dummy. + allocate(buffer(1,1,1,1)) + endif + + buffer = 0 + + end subroutine alloc_halo_buffer_r8_4d + + + !================================================================================================== + ! + ! Load static data from netCDF files + ! + !================================================================================================== + + ! Load the full panel nest latlons from netCDF file + ! character(*), parameter :: nc_filename = '/scratch2/NAGAPE/aoml-hafs1/William.Ramstrom/static_grids/C384_grid.tile6.nc' + ! Read in the lat/lon in degrees, convert to radians + + subroutine load_nest_latlons_from_nc(nc_filename, nxp, nyp, refine, pelist, & + fp_tile_geo, fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine) + implicit none + + character(*), intent(in) :: nc_filename + integer, intent(in) :: nxp, nyp, refine + integer, allocatable, intent(in) :: pelist(:) + type(grid_geometry), intent(out) :: fp_tile_geo + integer, intent(out) :: fp_istart_fine, fp_iend_fine, fp_jstart_fine, fp_jend_fine + + !======================================================================================== + ! + ! Determine which tile this PE is operating on + ! Load the lat/lon data from netCDF file + ! If fine nest, also determine the parent tile + ! load the lat/lon data from that tile + ! This code will only operate for nest motion within a single tile + ! + !======================================================================================== + + ! read lat/lon for this tile + ! lat is y from grid file + ! lon is x from grid file + + integer :: nx, ny + + integer :: nn + integer :: super_nxp, super_nyp, mid_nx, mid_ny + integer :: super_nx, super_ny + type(grid_geometry) :: temp_tile_geo + ! Full panel nest data + integer :: i, j, fi, fj + integer :: this_pe + + real(kind=kind_phys) :: pi = 4d0 * atan(1.0d0) + real(kind=kind_phys) :: deg2rad + + deg2rad = pi / 180.0d0 + + this_pe = mpp_pe() + + nx = nxp - 1 + ny = nyp - 1 + + ! Global tiles don't have a halo in lat/lon data + ! Nests have a halo in the lat/lon data + !start = 1 + !nread = 1 + + ! single fine nest + ! full panel variables + !fp_istart_fine = 12 + !fp_iend_fine = 269 + !fp_jstart_fine = 12 + !fp_jend_fine = 269 + !super_nx = 2*(fp_iend_fine - fp_istart_fine + 1) + ( ehalo + whalo ) + !super_ny = 2*(fp_jend_fine - fp_jstart_fine + 1) + ( nhalo + shalo ) + + fp_istart_fine = 1 + fp_iend_fine = nx * refine + fp_jstart_fine = 1 + fp_jend_fine = ny * refine + super_nx = 2*(fp_iend_fine - fp_istart_fine + 1) + super_ny = 2*(fp_jend_fine - fp_jstart_fine + 1) + + super_nxp = super_nx + 1 + super_nyp = super_ny + 1 + + mid_nx = (fp_iend_fine - fp_istart_fine) + mid_ny = (fp_jend_fine - fp_jstart_fine) + + call alloc_read_data(nc_filename, 'x', super_nxp, super_nyp, fp_tile_geo%lons, pelist) + call alloc_read_data(nc_filename, 'y', super_nxp, super_nyp, fp_tile_geo%lats, pelist) + call alloc_read_data(nc_filename, 'area', super_nx, super_ny, fp_tile_geo%area, pelist) + + ! double dx(nyp, nx) + !call alloc_read_data(nc_filename, 'dx', super_nx, super_nyp, fp_tile_geo%dx) + ! double dy(ny, nxp) + !call alloc_read_data(nc_filename, 'dy', super_nxp, super_ny, fp_tile_geo%dy) + ! double area(ny, nx) + !call alloc_read_data(nc_filename, 'area', super_nx, super_ny, fp_tile_geo%area) + + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! + !! Setup the lat/lons of the actual nest, read from the larger array + !! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + !super_nxp = 2*(iend_fine - istart_fine + 1) + 2 * ( ehalo + whalo ) + 1 + !super_nyp = 2*(jend_fine - jstart_fine + 1) + 2 * ( nhalo + shalo ) + 1 + !mid_nx = (iend_fine - istart_fine) + !mid_ny = (jend_fine - jstart_fine) + + ! end reading in nest + + fp_tile_geo%lats = fp_tile_geo%lats * deg2rad + fp_tile_geo%lons = fp_tile_geo%lons * deg2rad + + end subroutine load_nest_latlons_from_nc + +#ifdef OVERLOAD_R8 + subroutine alloc_read_data_r4_2d(nc_filename, var_name, x_size, y_size, data_array, pes, time) + character(len=*), intent(in) :: nc_filename, var_name + integer, intent(in) :: x_size, y_size + real*4, allocatable, intent(inout) :: data_array(:,:) + integer, allocatable, intent(in) :: pes(:) + integer, intent(in),optional :: time + + type(FmsNetcdfFile_t) :: fileobj !< Fms2_io fileobj + real*4, allocatable :: time_array(:,:,:) + integer :: this_pe + + ! Allocate data_array to match the expected data size, then read in the data + ! This subroutine consolidates the allocation and reading of data to ensure consistency of data sizing and simplify code + ! Could later extend this function to determine data size based on netCDF file metadata + + this_pe = mpp_pe() + + allocate(data_array(x_size, y_size)) + data_array = -9999.9 + + if (present(time)) then + allocate(time_array(x_size, y_size, 12)) ! assume monthly data; allocate 12 slots + if (open_file(fileobj, nc_filename, "read", pelist=pes, is_restart=.false.)) then + call read_data(fileobj, var_name, time_array) + call close_file(fileobj) + endif + + data_array = time_array(:,:,time) + deallocate(time_array) + else + ! Following transition documents at https://github.com/NOAA-GFDL/FMS/tree/2021.03.01/fms2_io + if (open_file(fileobj, nc_filename, "read", pelist=pes, is_restart=.false.)) then + call read_data(fileobj, var_name, data_array) + call close_file(fileobj) + endif + endif + + end subroutine alloc_read_data_r4_2d +#endif + + subroutine alloc_read_data_r8_2d(nc_filename, var_name, x_size, y_size, data_array, pes, time) + character(len=*), intent(in) :: nc_filename, var_name + integer, intent(in) :: x_size, y_size + real*8, allocatable, intent(inout) :: data_array(:,:) + integer, allocatable, intent(in) :: pes(:) + integer, intent(in),optional :: time + + real*8, allocatable :: time_array(:,:,:) + type(FmsNetcdfFile_t) :: fileobj !< Fms2_io fileobj + integer :: this_pe + + ! Allocate data_array to match the expected data size, then read in the data + ! This subroutine consolidates the allocation and reading of data to ensure consistency of data sizing and simplify code + ! Could later extend this function to determine data size based on netCDF file metadata + + this_pe = mpp_pe() + + allocate(data_array(x_size, y_size)) + data_array = -9999.9 + + ! Following transition documents at https://github.com/NOAA-GFDL/FMS/tree/2021.03.01/fms2_io + if (present(time)) then + allocate(time_array(x_size, y_size, 12)) ! assume monthly data; allocate 12 slots + if (open_file(fileobj, nc_filename, "read", pelist=pes, is_restart=.false.)) then + call read_data(fileobj, var_name, time_array) + call close_file(fileobj) + endif + + data_array = time_array(:,:,time) + deallocate(time_array) + else + if (open_file(fileobj, nc_filename, "read", pelist=pes, is_restart=.false.)) then + call read_data(fileobj, var_name, data_array) + call close_file(fileobj) + endif + endif + + end subroutine alloc_read_data_r8_2d + + + !================================================================================================== + ! + ! NetCDF Function Section + ! + !================================================================================================== + + subroutine output_grid_to_nc_3d(flag, istart, iend, jstart, jend, k, grid, file_str, var_name, time_step, dom, pos) + implicit none + + character(len=*), intent(in) :: flag + integer, intent(in) :: istart, iend, jstart, jend, k + real, dimension(:,:,:), intent(in) :: grid + character(len=*), intent(in) :: file_str, var_name + integer, intent(in) :: time_step + type(domain2d), intent(in) :: dom + integer, intent(in) :: pos + + logical :: new_file + integer :: this_pe + character(len=512) :: dirname + character(len=512) :: filename + type(FmsNetcdfDomainFile_t) :: fileobj !< Fms2_io domain decomposed fileobj + character(len=10) :: dim_names(3) !< Array of dimension names + integer :: istat + logical :: file_exists + character(len=12) :: mode + + istat = getcwd(dirname) + write (filename, "(A,A1,A,A1,A,A1,I0.3,A)") trim(dirname), "/", trim(file_str), "_", trim(var_name), "_", time_step, ".nc" + + if (pos .eq. CENTER) then + dim_names(1) = "xaxis_1" + dim_names(2) = "yaxis_1" + elseif (pos .eq. NORTH) then + dim_names(1) = "xaxis_2" + dim_names(2) = "yaxis_2" + elseif (pos .eq. EAST) then + dim_names(1) = "xaxis_3" + dim_names(2) = "yaxis_3" + endif + + !dim_names(3) = "zaxis_1" + write (dim_names(3),'(A,I0)') "zaxis_", k + + !inquire(FILE=filename, EXIST=file_exists) + !if (file_exists) then + ! mode = "append" + !else + ! mode = "overwrite" + !endif + + new_file = .true. + + if (new_file) then + mode = "write" + else + mode = "append" + endif + + mode = "write" + + if (open_file(fileobj, filename, mode, dom)) then + + if (new_file) then + call register_axis(fileobj, dim_names(1), "x", CENTER) ! TODO investigate handling of non-centered position + call register_axis(fileobj, dim_names(2), "y", CENTER) ! TODO investigate handling of non-centered position + call register_axis(fileobj, trim(dim_names(3)), k) + endif + + call register_field(fileobj, trim(var_name), 'float', dim_names) + call write_data(fileobj, trim(var_name), grid) + call close_file(fileobj) + endif + +! if (.not. is_dimension_registered(fileobj, dim_names(1))) then +! call register_axis(fileobj, dim_names(1), "x") ! TODO investigate handling of non-centered position +! endif +! if (.not. is_dimension_registered(fileobj, dim_names(2))) call register_axis(fileobj, dim_names(2), "y") ! TODO investigate handling of non-centered position +! if (.not. is_dimension_registered(fileobj, trim(dim_names(3)))) then +! call register_axis(fileobj, trim(dim_names(3)), k) +! endif + + end subroutine output_grid_to_nc_3d + + + subroutine output_grid_to_nc_2d(flag, istart, iend, jstart, jend, grid, file_str, var_name, time_step, dom, pos) + implicit none + + character(len=*), intent(in) :: flag + integer, intent(in) :: istart, iend, jstart, jend + real, dimension(:,:), intent(in) :: grid + character(len=*), intent(in) :: file_str, var_name + integer, intent(in) :: time_step + type(domain2d), intent(in) :: dom + integer, intent(in) :: pos + + logical :: new_file + integer :: istat + character(len=512) :: dirname + character(len=512) :: filename + type(FmsNetcdfDomainFile_t) :: fileobj !< Fms2_io domain decomposed fileobj + character(len=8) :: dim_names(2) !< Array of dimension names + character(len=12) :: mode + + istat = getcwd(dirname) + write (filename, "(A,A1,A,A1,A,A1,I0.3,A)") trim(dirname), "/", trim(file_str), "_", trim(var_name), "_", time_step, ".nc" + + if (pos .eq. CENTER) then + dim_names(1) = "xaxis_1" + dim_names(2) = "yaxis_1" + elseif (pos .eq. NORTH) then + dim_names(1) = "xaxis_2" + dim_names(2) = "yaxis_2" + elseif (pos .eq. EAST) then + dim_names(1) = "xaxis_3" + dim_names(2) = "yaxis_3" + endif + + new_file = .true. + + if (new_file) then + mode = "write" + else + mode = "append" + endif + + if (open_file(fileobj, filename, mode, dom)) then + if (new_file) then + call register_axis(fileobj, dim_names(1), "x", CENTER) ! TODO investigate handling of non-centered position + call register_axis(fileobj, dim_names(2), "y", CENTER) ! TODO investigate handling of non-centered position + endif + + call register_field(fileobj, trim(var_name), 'float', dim_names) + call write_data(fileobj, trim(var_name), grid) + call close_file(fileobj) + endif + + end subroutine output_grid_to_nc_2d + + + + !================================================================================================== + ! + ! Fill Section + ! + !================================================================================================== + + subroutine fill_grid_from_supergrid_r4_3d(in_grid, stagger_type, fp_super_tile_geo, ioffset, joffset, x_refine, y_refine) + implicit none + real*4, allocatable, intent(inout) :: in_grid(:,:,:) + integer, intent(in) :: stagger_type ! CENTER, CORNER + type(grid_geometry), intent(in) :: fp_super_tile_geo + integer, intent(in) :: ioffset, joffset, x_refine, y_refine + + integer :: nest_x, nest_y, parent_x, parent_y + type(bbox) :: tile_bbox, fp_tile_bbox + integer :: i, j, fp_i, fp_j + character(len=64) :: errstring + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + ! There are a few different offsets operating here: + ! 1. ioffset,joffset is how far the start of the (centered/corner?) grid is from the start of the parent grid + ! i.e. the index of the parent center cell (not supergrid!) where the nest compute domain begins + ! 2. nest_x, nest_y are the initial indices of this tile of the nest (the patch running on the PE) + ! 2. parent_x, parent_y are the initial indices of this tile of the parent supergrid (the patch running on the PE) + ! 3. parent_x = ((ioffset -1) * x_refine + nest_x) * 2 + ! + + call fill_bbox(tile_bbox, in_grid) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + ! Calculate new parent alignment -- supergrid at the refine ratio + nest_x = tile_bbox%is + nest_y = tile_bbox%js + + parent_x = ((ioffset - 1) * x_refine + nest_x) * 2 + parent_y = ((joffset - 1) * y_refine + nest_y) * 2 + + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + if (stagger_type == CENTER) then + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + elseif (stagger_type == CORNER) then + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y - 1 + endif + + ! Make sure we don't run off the edge of the parent supergrid + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_i=", fp_i," is=",fp_tile_bbox%is," ie=",fp_tile_bbox%ie + call mpp_error(FATAL, "fill_grid_from_supergrid_r4_3d invalid bounds i " // errstring) + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_j=", fp_j," js=",fp_tile_bbox%js," je=",fp_tile_bbox%je + call mpp_error(FATAL, "fill_grid_from_supergrid_r4_3d invalid bounds j " // errstring) + endif + + in_grid(i,j,2) = fp_super_tile_geo%lats(fp_i, fp_j) + in_grid(i,j,1) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine fill_grid_from_supergrid_r4_3d + + + subroutine fill_grid_from_supergrid_r8_3d(in_grid, stagger_type, fp_super_tile_geo, ioffset, joffset, x_refine, y_refine) + implicit none + real*8, allocatable, intent(inout) :: in_grid(:,:,:) + integer, intent(in) :: stagger_type ! CENTER, CORNER + type(grid_geometry), intent(in) :: fp_super_tile_geo + integer, intent(in) :: ioffset, joffset, x_refine, y_refine + + integer :: nest_x, nest_y, parent_x, parent_y + type(bbox) :: tile_bbox, fp_tile_bbox + integer :: i, j, fp_i, fp_j + character(len=64) :: errstring + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + ! There are a few different offsets operating here: + ! 1. ioffset,joffset is how far the start of the (centered/corner?) grid is from the start of the parent grid + ! i.e. the index of the parent center cell (not supergrid!) where the nest compute domain begins + ! 2. nest_x, nest_y are the initial indices of this tile of the nest (the patch running on the PE) + ! 2. parent_x, parent_y are the initial indices of this tile of the parent supergrid (the patch running on the PE) + ! 3. parent_x = ((ioffset -1) * x_refine + nest_x) * 2 + ! + + call fill_bbox(tile_bbox, in_grid) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + ! Calculate new parent alignment -- supergrid at the refine ratio + nest_x = tile_bbox%is + nest_y = tile_bbox%js + + parent_x = ((ioffset - 1) * x_refine + nest_x) * 2 + parent_y = ((joffset - 1) * y_refine + nest_y) * 2 + + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + if (stagger_type == CENTER) then + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + elseif (stagger_type == CORNER) then + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y - 1 + endif + + ! Make sure we don't run off the edge of the parent supergrid + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_i=", fp_i," is=",fp_tile_bbox%is," ie=",fp_tile_bbox%ie + call mpp_error(FATAL, "fill_grid_from_supergrid_r8_3d invalid bounds i " // errstring) + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_j=", fp_j," js=",fp_tile_bbox%js," je=",fp_tile_bbox%je + call mpp_error(FATAL, "fill_grid_from_supergrid_r8_3d invalid bounds j " // errstring) + endif + + in_grid(i,j,2) = fp_super_tile_geo%lats(fp_i, fp_j) + in_grid(i,j,1) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine fill_grid_from_supergrid_r8_3d + + + subroutine fill_grid_from_supergrid_r8_4d(in_grid, stagger_type, fp_super_tile_geo, ioffset, joffset, x_refine, y_refine) + implicit none + real*8, allocatable, intent(inout) :: in_grid(:,:,:,:) + integer, intent(in) :: stagger_type ! CENTER, CORNER + type(grid_geometry), intent(in) :: fp_super_tile_geo + integer, intent(in) :: ioffset, joffset, x_refine, y_refine + + integer :: nest_x, nest_y, parent_x, parent_y + type(bbox) :: tile_bbox, fp_tile_bbox + integer :: i, j, fp_i, fp_j + character(len=64) :: errstring + + ! tile_geo is cell-centered, at nest refinement + ! fp_super_tile_geo is a supergrid, at nest refinement + + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + ! There are a few different offsets operating here: + ! 1. ioffset,joffset is how far the start of the (centered/corner?) grid is from the start of the parent grid + ! i.e. the index of the parent center cell (not supergrid!) where the nest compute domain begins + ! 2. nest_x, nest_y are the initial indices of this tile of the nest (the patch running on the PE) + ! 2. parent_x, parent_y are the initial indices of this tile of the parent supergrid (the patch running on the PE) + ! 3. parent_x = ((ioffset -1) * x_refine + nest_x) * 2 + ! + + call fill_bbox(tile_bbox, in_grid) + call fill_bbox(fp_tile_bbox, fp_super_tile_geo%lats) + + ! Calculate new parent alignment -- supergrid at the refine ratio + nest_x = tile_bbox%is + nest_y = tile_bbox%js + + parent_x = ((ioffset - 1) * x_refine + nest_x) * 2 + parent_y = ((joffset - 1) * y_refine + nest_y) * 2 + + do i = tile_bbox%is, tile_bbox%ie + do j = tile_bbox%js, tile_bbox%je + if (stagger_type == CENTER) then + fp_i = (i - nest_x) * 2 + parent_x + fp_j = (j - nest_y) * 2 + parent_y + elseif (stagger_type == CORNER) then + fp_i = (i - nest_x) * 2 + parent_x - 1 + fp_j = (j - nest_y) * 2 + parent_y - 1 + endif + + ! Make sure we don't run off the edge of the parent supergrid + if (fp_i < fp_tile_bbox%is .or. fp_i > fp_tile_bbox%ie) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_i=", fp_i," is=",fp_tile_bbox%is," ie=",fp_tile_bbox%ie + call mpp_error(FATAL, "fill_grid_from_supergrid_r8_4d invalid bounds i " // errstring) + endif + if (fp_j < fp_tile_bbox%js .or. fp_j > fp_tile_bbox%je) then + write(errstring, "(A,I0,A,I0,A,I0)") "fp_j=", fp_j," js=",fp_tile_bbox%js," je=",fp_tile_bbox%je + call mpp_error(FATAL, "fill_grid_from_supergrid_r8_4d invalid bounds j " // errstring) + endif + + in_grid(i,j,2,1) = fp_super_tile_geo%lats(fp_i, fp_j) + in_grid(i,j,1,1) = fp_super_tile_geo%lons(fp_i, fp_j) + enddo + enddo + + ! Validate at the end + !call find_nest_alignment(tile_geo, fp_super_tile_geo, nest_x, nest_y, parent_x, parent_y) + + end subroutine fill_grid_from_supergrid_r8_4d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_r4_2d(interp_type, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*4, allocatable, intent(inout) :: x(:,:) + real*4, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + ! case (3) ! C grid staggered + case (4) + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + case (9) + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, dir, wt) + call mpp_error(FATAL, '2D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r4_2d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_r8_2d(interp_type, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + ! case (3) ! C grid staggered + case (4) + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + case (9) + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, dir, wt) + call mpp_error(FATAL, '2D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r8_2d + + + subroutine fill_nest_from_buffer_masked(interp_type, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + real, allocatable, intent(in) :: mask_var(:,:) + integer, intent(in) :: mask_val + real*8, intent(in) :: default_val + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + ! case (3) ! C grid staggered + case (4) + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + case (7) + call fill_nest_from_buffer_cell_center_masked("A", x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + case (9) + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, dir, wt) + call mpp_error(FATAL, '2D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_masked + + + + subroutine fill_nest_from_buffer_r4_3d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*4, allocatable, intent(inout) :: x(:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) ! C grid staggered + case (4) + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + call mpp_error(FATAL, 'fill_nest_from_buffer_nearest_neighbor is not yet implemented.') + case default + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r4_3d + + + subroutine fill_nest_from_buffer_r8_3d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) ! C grid staggered + case (4) + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + call mpp_error(FATAL, 'nearest_neighbor is not yet implemented for fv_moving_nest_utils.F90::fill_nest_from_buffer_3D_kindphys') + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + case default + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r8_3d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_r4_4d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*4, allocatable, intent(inout) :: x(:,:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) ! C grid staggered + case (4) + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + call mpp_error(FATAL, '4D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r4_4d + + + subroutine fill_nest_from_buffer_r8_4d(interp_type, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + + integer, intent(in) :: interp_type + real*8, allocatable, intent(inout) :: x(:,:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + integer :: this_pe + this_pe = mpp_pe() + + ! Output the interpolation type + select case (interp_type) + case (1) + call fill_nest_from_buffer_cell_center("A", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + ! case (3) ! C grid staggered + case (4) + call fill_nest_from_buffer_cell_center("D", x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + case (9) + !call fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + call mpp_error(FATAL, '4D fill_nest_from_buffer_nearest_neighbor not yet implemented.') + case default + call mpp_error(FATAL, 'interp_single_nest got invalid value for interp_type from namelist.') + end select + + end subroutine fill_nest_from_buffer_r8_4d + + + !>@brief This subroutine fills the nest halo data from the coarse grid data by downscaling. It can accommodate all grid staggers, using the stagger variable. [The routine needs to be renamed since "_from_cell_center" has become incorrect.) + !>@details Applicable to any interpolation type + + subroutine fill_nest_from_buffer_cell_center_r4_2d(stagger, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*4, allocatable, intent(inout) :: x(:,:) + real*4, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j) = & + wt(i,j,1)*buffer(ic, jc ) + & + wt(i,j,2)*buffer(ic, jc+1) + & + wt(i,j,3)*buffer(ic+1,jc+1) + & + wt(i,j,4)*buffer(ic+1,jc ) + + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_cell_center_r4_2d + + + subroutine fill_nest_from_buffer_cell_center_r8_2d(stagger, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j) = & + wt(i,j,1)*buffer(ic, jc ) + & + wt(i,j,2)*buffer(ic, jc+1) + & + wt(i,j,3)*buffer(ic+1,jc+1) + & + wt(i,j,4)*buffer(ic+1,jc ) + + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_cell_center_r8_2d + + + subroutine fill_nest_from_buffer_cell_center_masked(stagger, x, buffer, bbox_fine, bbox_coarse, dir, x_refine, y_refine, wt, ind, mask_var, mask_val, default_val) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:) + real*8, allocatable, intent(in) :: buffer(:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + real, allocatable, intent(in) :: mask_var(:,:) + integer, intent(in) :: mask_val + real*8, intent(in) :: default_val + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + real :: tw + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + + ic = ind(i,j,1) + jc = ind(i,j,2) + + !x(i,j) = & + ! wt(i,j,1)*buffer(ic, jc ) + & + ! wt(i,j,2)*buffer(ic, jc+1) + & + ! wt(i,j,3)*buffer(ic+1,jc+1) + & + ! wt(i,j,4)*buffer(ic+1,jc ) + + ! Land type + !if (mask_var(i,j) .eq. mask_val) then + x(i,j) = 0.0 + tw = 0.0 + if (buffer(ic,jc) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic, jc ) + if (buffer(ic,jc+1) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic, jc+1) + if (buffer(ic+1,jc+1) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic+1,jc+1) + if (buffer(ic+1,jc) .gt. -1.0) x(i,j) = x(i,j) + wt(i,j,1)*buffer(ic+1,jc ) + + if (buffer(ic,jc) .gt. -1.0) tw = tw + wt(i,j,1) + if (buffer(ic,jc+1) .gt. -1.0) tw = tw + wt(i,j,1) + if (buffer(ic+1,jc+1) .gt. -1.0) tw = tw + wt(i,j,1) + if (buffer(ic+1,jc) .gt. -1.0) tw = tw + wt(i,j,1) + + if (tw .gt. 0.0) then + x(i,j) = x(i,j) / tw + else + x(i,j) = default_val + endif + + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_cell_center_masked + + + subroutine fill_nest_from_buffer_cell_center_r4_3d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*4, allocatable, intent(inout) :: x(:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j,k) = & + wt(i,j,1)*buffer(ic, jc, k) + & + wt(i,j,2)*buffer(ic, jc+1,k) + & + wt(i,j,3)*buffer(ic+1,jc+1,k) + & + wt(i,j,4)*buffer(ic+1,jc, k) + + enddo + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_cell_center_r4_3d + + subroutine fill_nest_from_buffer_cell_center_r8_3d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + !if (stagger == "A") then + !else if (stagger == "C") then + !else if (stagger == "D") then + !endif + + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j,k) = & + wt(i,j,1)*buffer(ic, jc, k) + & + wt(i,j,2)*buffer(ic, jc+1,k) + & + wt(i,j,3)*buffer(ic+1,jc+1,k) + & + wt(i,j,4)*buffer(ic+1,jc, k) + enddo + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_cell_center_r8_3d + + + subroutine fill_nest_from_buffer_cell_center_r4_4d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*4, allocatable, intent(inout) :: x(:,:,:,:) + real*4, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, v, ic, jc + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do v=1,ubound(buffer,4) + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j,k,v) = & + wt(i,j,1)*buffer(ic, jc, k, v) + & + wt(i,j,2)*buffer(ic, jc+1,k, v) + & + wt(i,j,3)*buffer(ic+1,jc+1,k, v) + & + wt(i,j,4)*buffer(ic+1,jc, k, v) + enddo + enddo + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_cell_center_r4_4d + + + subroutine fill_nest_from_buffer_cell_center_r8_4d(stagger, x, buffer, bbox_fine, bbox_coarse, nz, dir, x_refine, y_refine, wt, ind) + implicit none + character ( len = 1 ), intent(in) :: stagger + real*8, allocatable, intent(inout) :: x(:,:,:,:) + real*8, allocatable, intent(in) :: buffer(:,:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: nz + integer, intent(in) :: dir, x_refine, y_refine + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, allocatable, intent(in) :: ind(:,:,:) + + character(len=8) :: dir_str + integer :: i, j, k, v, ic, jc + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do v=1,ubound(buffer,4) + do k=1,nz + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + ic = ind(i,j,1) + jc = ind(i,j,2) + + x(i,j,k,v) = & + wt(i,j,1)*buffer(ic, jc, k, v) + & + wt(i,j,2)*buffer(ic, jc+1,k, v) + & + wt(i,j,3)*buffer(ic+1,jc+1,k, v) + & + wt(i,j,4)*buffer(ic+1,jc, k, v) + enddo + enddo + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_cell_center_r8_4d + + + subroutine fill_nest_from_buffer_nearest_neighbor(x, buffer, bbox_fine, bbox_coarse, nz, dir, wt) + implicit none + + real, allocatable, intent(inout) :: x(:,:,:) + real, allocatable, intent(in) :: buffer(:,:,:) + type(bbox), intent(in) :: bbox_fine, bbox_coarse + integer, intent(in) :: dir + real, allocatable, intent(in) :: wt(:,:,:) ! The final dimension is always 4 + integer, intent(in) :: nz + + character(len=8) :: dir_str + integer :: i, j, k, ic, jc + integer :: nearest_idx + + select case(dir) + case (NORTH) + dir_str = "NORTH" + case (SOUTH) + dir_str = "SOUTH" + case (EAST) + dir_str = "EAST" + case (WEST) + dir_str = "WEST" + case default + dir_str = "ERR DIR" + end select + + if( bbox_coarse%ie .GE. bbox_coarse%is .AND. bbox_coarse%je .GE. bbox_coarse%js ) then + do j=bbox_fine%js, bbox_fine%je + do i=bbox_fine%is, bbox_fine%ie + + ic = bbox_coarse%is + 1 + jc = bbox_coarse%js + 1 + + do k=1,nz + + ! Pick the maximum weight of the 4 + ! If two are tied for the max weight, use whichever one maxloc returns first + ! TODO Might need a more deterministic algorithm here for reproducibility; e.g. take the lowest index, etc. + nearest_idx = maxloc(wt(i, j, :), 1) + + select case (nearest_idx) + case (1) + x(i,j,k) = buffer(ic, jc, k) + case (2) + x(i,j,k) = buffer(ic, jc+1,k) + case (3) + x(i,j,k) = buffer(ic+1,jc+1,k) + case (4) + x(i,j,k) = buffer(ic+1,jc, k) + case default + ! Fill in with first value and warn + x(i,j,k) = buffer(ic, jc, k) + !if (debug_log) print '("[WARN] Nearest Neighbor algorithm mismatch index ",I0," buffer. npe=",I0," x(",I0,",",I0,",",I0,")=",F12.5)', nearest_idx, this_pe, i, j, k, x(i,j,k) + end select + enddo + enddo + enddo + endif + + end subroutine fill_nest_from_buffer_nearest_neighbor + + + subroutine fill_weight_grid(atm_wt, new_wt) + real, allocatable, intent(inout) :: atm_wt(:,:,:) + real, allocatable, intent(in) :: new_wt(:,:,:) + + integer :: x,y,z,n + integer :: this_pe + + this_pe = mpp_pe() + + do n=1,3 + if (lbound(atm_wt, n) .ne. lbound(new_wt, n)) then + call mpp_error(FATAL, "fill_weight_grid invalid lower bounds") + endif + if (ubound(atm_wt, n) .ne. ubound(new_wt, n)) then + call mpp_error(FATAL, "fill_weight_grid invalid upper bounds") + endif + enddo + + do x = lbound(atm_wt,1),ubound(atm_wt,1) + do y = lbound(atm_wt,2),ubound(atm_wt,2) + do z = 1,4 + atm_wt(x,y,z) = new_wt(x,y,z) + enddo + enddo + enddo + + end subroutine fill_weight_grid + +end module fv_moving_nest_utils_mod diff --git a/moving_nest/fv_tracker.F90 b/moving_nest/fv_tracker.F90 new file mode 100644 index 000000000..75c38217b --- /dev/null +++ b/moving_nest/fv_tracker.F90 @@ -0,0 +1,1905 @@ +!*********************************************************************** +!* GNU General Public License * +!* This file is a part of fvGFS. * +!* * +!* fvGFS is free software; you can redistribute it and/or modify it * +!* and are expected to follow the terms of the GNU General Public * +!* License as published by the Free Software Foundation; either * +!* version 2 of the License, or (at your option) any later version. * +!* * +!* fvGFS is distributed in the hope that it will be useful, but * +!* WITHOUT ANY WARRANTY; without even the implied warranty of * +!* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * +!* General Public License for more details. * +!* * +!* For the full text of the GNU General Public License, * +!* write to: Free Software Foundation, Inc., * +!* 675 Mass Ave, Cambridge, MA 02139, USA. * +!* or see: http://www.gnu.org/licenses/gpl.html * +!*********************************************************************** + +!>@brief The module 'fv_tracker' contains the internal GFDL/NCEP vortex tracker +!adapted from HWRF internal vortex tracker, mainly based on the GFDL vortex +!tracker. + +module fv_tracker_mod + +#include + + use constants_mod, only: pi=>pi_8, rad_to_deg, deg_to_rad, RVGAS, RDGAS + use fms_mod, only: mpp_clock_id, CLOCK_SUBCOMPONENT, clock_flag_default, & + mpp_clock_begin, mpp_clock_end + use time_manager_mod, only: time_type, get_time, set_time, operator(+), & + operator(-), operator(/), time_type_to_real, date_to_string + use mpp_mod, only: mpp_error, stdout, FATAL, WARNING, NOTE, & + mpp_root_pe, mpp_npes, mpp_pe, mpp_chksum, & + mpp_get_current_pelist, & + mpp_set_current_pelist, mpp_sync + use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain + use fv_arrays_mod, only: fv_atmos_type, R_GRID + use fv_diagnostics_mod, only: fv_diag_init, fv_diag, fv_time, prt_maxmin, prt_height + use fv_diagnostics_mod, only: interpolate_vertical, interpolate_z, get_vorticity, & + get_height_field, get_pressure_given_height, & + get_height_given_pressure, cs3_interpolator + use fv_mp_mod, only: is_master, & + mp_reduce_sum, mp_reduce_max, mp_reduce_min, & + mp_reduce_minval, mp_reduce_maxval, & + mp_reduce_minloc, mp_reduce_maxloc + + use fv_timing_mod, only: timing_on, timing_off + use fv_moving_nest_types_mod, only: Moving_nest + + implicit none + private + public :: fv_tracker_init, fv_tracker_center, fv_tracker_post_move + public :: fv_diag_tracker, allocate_tracker, deallocate_tracker + public :: check_is_moving_nest, execute_tracker + public :: Tracker + + integer, parameter :: maxtp=11 ! number of tracker parameters + + real, parameter :: invE=0.36787944117 ! 1/e + real, parameter :: searchrad_6=250.0 ! km - ignore data more than this far from domain center + real, parameter :: searchrad_7=200.0 ! km - ignore data more than this far from domain center + real, parameter :: uverrmax=225.0 ! For use in get_uv_guess + real, parameter :: ecircum=40030.2 ! Earth's circumference (km) using erad=6371.e3 + real, parameter :: rads_vmag=120.0 ! max search radius for wind minimum + real, parameter :: err_reg_init=300.0 ! max err at initial time (km) + real, parameter :: err_reg_max=225.0 ! max err at other times (km) + + real, parameter :: errpmax=485.0 ! max stddev of track parameters + real, parameter :: errpgro=1.25 ! stddev multiplier + + real, parameter :: max_wind_search_radius=searchrad_7 ! max radius for vmax search + real, parameter :: min_mlsp_search_radius=searchrad_7 ! max radius for pmin search + + real, parameter :: km2nmi=0.539957, kn2mps=0.514444, mps2kn=1./kn2mps + + + type fv_tracker_type + ! For internal vortex tracker + real, _ALLOCATABLE :: vort850(:,:) _NULL !< relative vorticity at 850 mb + real, _ALLOCATABLE :: spd850(:,:) _NULL !< wind speed at 850 mb + real, _ALLOCATABLE :: u850(:,:) _NULL !< ua at 850 mb + real, _ALLOCATABLE :: v850(:,:) _NULL !< va at 850 mb + real, _ALLOCATABLE :: z850(:,:) _NULL !< geopotential height at 850 mb + real, _ALLOCATABLE :: vort700(:,:) _NULL !< relative vorticity at 700 mb + real, _ALLOCATABLE :: spd700(:,:) _NULL !< wind speed at 700 mb + real, _ALLOCATABLE :: u700(:,:) _NULL !< ua at 700 mb + real, _ALLOCATABLE :: v700(:,:) _NULL !< va at 700 mb + real, _ALLOCATABLE :: z700(:,:) _NULL !< geopotential height at 700 mb + real, _ALLOCATABLE :: vort10m(:,:) _NULL !< relative vorticity at 10-m + real, _ALLOCATABLE :: spd10m(:,:) _NULL !< wind speed at 10-m + real, _ALLOCATABLE :: u10m(:,:) _NULL !< ua at 10-m + real, _ALLOCATABLE :: v10m(:,:) _NULL !< va at 10-m + real, _ALLOCATABLE :: slp(:,:) _NULL !< sea level pressure + + ! For inline NCEP tracker + real, _ALLOCATABLE :: distsq(:,:) _NULL !< Square of distance from nest center + real, _ALLOCATABLE :: tracker_distsq(:,:) _NULL !< Square of distance from tracker fix location + real, _ALLOCATABLE :: tracker_angle(:,:) _NULL !< Angle to storm center (East=0, North=pi/2, etc.) + real, _ALLOCATABLE :: tracker_fixes(:,:) _NULL !< Tracker fix information for debugging + + logical :: track_have_guess = .false. !< Is a first guess available? + real :: track_guess_lat !< First guess latitude + real :: track_guess_lon !< First guess longitude + real :: tracker_edge_dist !< Distance from storm center to domain edge + + real :: track_stderr_m1 = -99.9 !< Standard deviation of tracker centers one hour ago + real :: track_stderr_m2 = -99.9 !< Standard deviation of tracker centers two hours ago + real :: track_stderr_m3 = -99.9 !< Standard deviation of tracker centers three hours ago + + integer :: track_last_hour=0 !< Last completed forecast hour + + real :: tracker_fixlon = -999.0 !< Storm fix longitude according to inline NCEP tracker + real :: tracker_fixlat = -999.0 !< Storm fix latitude according to inline NCEP tracker + integer :: tracker_ifix = -99 !< Storm fix i location + integer :: tracker_jfix = -99 !< Storm fix j location + + real :: tracker_rmw = -99. !< Storm RMW according to inline NCEP tracker + real :: tracker_pmin = -99999. !< Storm min MSLP according to inline NCEP tracker + real :: tracker_vmax =-99. !< Storm max 10m wind according to inline NCEP tracker + + logical :: tracker_havefix = .false. !< True = storm fix locations are valid + logical :: tracker_gave_up = .false. !< True = inline tracker gave up on tracking the storm + end type fv_tracker_type + + type(fv_tracker_type), _ALLOCATABLE, target :: Tracker(:) + integer :: n = 2 ! TODO allow to vary for multiple nests + integer :: id_fv_tracker + +contains + + subroutine fv_tracker_init(length) + ! Initialize tracker variables in the Atm structure. + implicit none + integer, intent(in) :: length + + integer :: i + + call mpp_error(NOTE, 'fv_tracker_init') + id_fv_tracker= mpp_clock_id ('FV tracker', flags = clock_flag_default, grain=CLOCK_SUBCOMPONENT ) + + allocate(Tracker(length)) + + do i=1,length + Tracker(i)%track_stderr_m1=-99.9 + Tracker(i)%track_stderr_m2=-99.9 + Tracker(i)%track_stderr_m3=-99.9 + ! Tracker(i)%track_n_old=0 + ! Tracker(i)%track_old_lon=0 + ! Tracker(i)%track_old_lat=0 + ! Tracker(i)%track_old_ntsd=0 + + Tracker(i)%tracker_angle=0 + Tracker(i)%tracker_fixlon=-999.0 + Tracker(i)%tracker_fixlat=-999.0 + Tracker(i)%tracker_ifix=-99 + Tracker(i)%tracker_jfix=-99 + Tracker(i)%tracker_havefix=.false. + Tracker(i)%tracker_gave_up=.false. + Tracker(i)%tracker_pmin=-99999. + Tracker(i)%tracker_vmax=-99. + Tracker(i)%tracker_rmw=-99. + + Tracker(i)%track_have_guess=.false. + Tracker(i)%track_guess_lat=-999.0 + Tracker(i)%track_guess_lon=-999.0 + enddo + + end subroutine fv_tracker_init + + subroutine allocate_tracker(i, is, ie, js, je) + integer, intent(in) :: i, is, ie, js, je + ! Allocate internal vortex tracker arrays + + allocate ( Tracker(i)%vort850(is:ie,js:je) ) + allocate ( Tracker(i)%spd850(is:ie,js:je) ) + allocate ( Tracker(i)%u850(is:ie,js:je) ) + allocate ( Tracker(i)%v850(is:ie,js:je) ) + allocate ( Tracker(i)%z850(is:ie,js:je) ) + allocate ( Tracker(i)%vort700(is:ie,js:je) ) + allocate ( Tracker(i)%spd700(is:ie,js:je) ) + allocate ( Tracker(i)%u700(is:ie,js:je) ) + allocate ( Tracker(i)%v700(is:ie,js:je) ) + allocate ( Tracker(i)%z700(is:ie,js:je) ) + allocate ( Tracker(i)%vort10m(is:ie,js:je) ) + allocate ( Tracker(i)%spd10m(is:ie,js:je) ) + allocate ( Tracker(i)%u10m(is:ie,js:je) ) + allocate ( Tracker(i)%v10m(is:ie,js:je) ) + allocate ( Tracker(i)%slp(is:ie,js:je) ) + + allocate ( Tracker(i)%distsq(is:ie,js:je) ) + allocate ( Tracker(i)%tracker_distsq(is:ie,js:je) ) + allocate ( Tracker(i)%tracker_angle(is:ie,js:je) ) + allocate ( Tracker(i)%tracker_fixes(is:ie,js:je) ) + end subroutine allocate_tracker + + subroutine deallocate_tracker(nn) + integer, intent(in) :: nn + + integer :: i + + ! Deallocate internal vortex tracker arrays + do i=1,nn + if (allocated(Tracker(i)%vort850)) then + deallocate ( Tracker(i)%vort850 ) + deallocate ( Tracker(i)%spd850 ) + deallocate ( Tracker(i)%u850 ) + deallocate ( Tracker(i)%v850 ) + deallocate ( Tracker(i)%z850 ) + deallocate ( Tracker(i)%vort700 ) + deallocate ( Tracker(i)%spd700 ) + deallocate ( Tracker(i)%u700 ) + deallocate ( Tracker(i)%v700 ) + deallocate ( Tracker(i)%z700 ) + deallocate ( Tracker(i)%vort10m ) + deallocate ( Tracker(i)%spd10m ) + deallocate ( Tracker(i)%u10m ) + deallocate ( Tracker(i)%v10m ) + deallocate ( Tracker(i)%slp ) + endif + enddo + deallocate(Tracker) + + end subroutine deallocate_tracker + + subroutine check_is_moving_nest(Atm, mygrid, ngrids, is_moving_nest, moving_nest_parent) + type(fv_atmos_type), intent(inout) :: Atm(:) + integer, intent(in) :: mygrid, ngrids + logical, intent(out) :: is_moving_nest, moving_nest_parent + + integer :: nn + + ! Currently, the moving nesting configuration only supports one parent (global + ! or regional) with one moving nest. + ! This will need to be revisited when multiple and telescoping moving nests are enabled. + + ! Set is_moving_nest to true if this is a moving nest + is_moving_nest = Moving_nest(mygrid)%mn_flag%is_moving_nest + ! Set parent_of_moving_nest to true if it has a moving nest child + + do nn=2,ngrids + if ( mygrid == Atm(nn)%parent_grid%grid_number .and. & + Moving_nest(nn)%mn_flag%is_moving_nest ) then + moving_nest_parent = .true. + endif + enddo + + end subroutine check_is_moving_nest + + + subroutine execute_tracker(Atm, mygrid, Time, Time_step) + implicit none + type(fv_atmos_type), intent(inout) :: Atm(:) + integer, intent(in) :: mygrid + type(time_type), intent(in) :: Time, Time_step + + real :: zvir + type(time_type) :: Time_next, Time_step_atmos + integer :: sec, seconds, days + + zvir = real(RVGAS/RDGAS) - 1.0 + + Time_step_atmos = Time_step + Time_next = Time + Time_step_atmos + + !---- FV internal vortex tracker ----- + if ( Moving_nest(mygrid)%mn_flag%is_moving_nest ) then + if ( Moving_nest(mygrid)%mn_flag%vortex_tracker .eq. 2 .or. & + Moving_nest(mygrid)%mn_flag%vortex_tracker .eq. 6 .or. & + Moving_nest(mygrid)%mn_flag%vortex_tracker .eq. 7 ) then + + fv_time = Time_next + call get_time (fv_time, seconds, days) + call get_time (Time_step_atmos, sec) + + if (mod(seconds,Moving_nest(mygrid)%mn_flag%ntrack*sec) .eq. 0) then + call mpp_clock_begin(id_fv_tracker) + call timing_on('FV_TRACKER') + call fv_diag_tracker(Atm(mygrid:mygrid), zvir, fv_time) + call fv_tracker_center(Atm(mygrid), mygrid, fv_time) + call timing_off('FV_TRACKER') + call mpp_clock_end(id_fv_tracker) + endif + + endif + endif + + end subroutine execute_tracker + + subroutine fv_tracker_center(Atm, n, Time) + ! Top-level entry to the internal GFDL/NCEP vortex tracker. Finds the center of + ! the storm in the specified Atm and updates the Atm variables. + ! Will do nothing and return immediately if + ! tracker%tracker_gave_up=.true. + implicit none + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: n + type(time_type), intent(in) :: Time + + integer :: ids,ide,jds,jde,kds,kde + integer :: ims,ime,jms,jme,kms,kme + integer :: ips,ipe,jps,jpe,kps,kpe + + call mpp_error(NOTE, 'fv_tracker_center') + + call get_ijk_from_domain(Atm, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + call ntc_impl(Atm, Tracker(n), Time, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + end subroutine fv_tracker_center + + subroutine fv_diag_tracker(Atm, zvir, Time) + + type(fv_atmos_type), intent(inout) :: Atm(:) + type(time_type), intent(in) :: Time + real, intent(in):: zvir + + integer :: isc, iec, jsc, jec, n, ntileMe + integer :: isd, ied, jsd, jed, npz, itrac + integer :: ngc + integer :: nt = 2 ! TODO adjust to nest number for multiple nests + + real, allocatable :: a2(:,:),a3(:,:,:),a4(:,:,:), wk(:,:,:), wz(:,:,:) + real :: height(2) + real :: ptop + integer, parameter:: nplev_tracker=2 + real:: plevs(nplev_tracker), pout(nplev_tracker) + integer:: idg(nplev_tracker), id1(nplev_tracker) + + integer i,j,k, yr, mon, dd, hr, mn, days, seconds, nq, theta_d + character(len=128) :: tname + + height(1) = 5.E3 ! for computing 5-km "pressure" + height(2) = 0. ! for sea-level pressure + + pout(1) = 700 * 1.e2 + plevs(1) = log( pout(1) ) + pout(2) = 850 * 1.e2 + plevs(2) = log( pout(2) ) + + ntileMe = size(Atm(:)) + n = 1 + isc = Atm(n)%bd%isc; iec = Atm(n)%bd%iec + jsc = Atm(n)%bd%jsc; jec = Atm(n)%bd%jec + ngc = Atm(n)%ng + npz = Atm(n)%npz + ptop = Atm(n)%ak(1) + nq = size (Atm(n)%q,4) + + isd = Atm(n)%bd%isd; ied = Atm(n)%bd%ied + jsd = Atm(n)%bd%jsd; jed = Atm(n)%bd%jed + + fv_time = Time + + if (.not. allocated(a2)) allocate ( a2(isc:iec,jsc:jec) ) + if (.not. allocated(wk)) allocate ( wk(isc:iec,jsc:jec,npz) ) + if (.not. allocated(a3)) allocate ( a3(isc:iec,jsc:jec,nplev_tracker) ) + if (.not. allocated(wz)) allocate ( wz(isc:iec,jsc:jec,npz+1) ) + + ! do n = 1, ntileMe + n = 1 + call get_height_field(isc, iec, jsc, jec, ngc, npz, Atm(n)%flagstruct%hydrostatic, Atm(n)%delz, & + wz, Atm(n)%pt, Atm(n)%q, Atm(n)%peln, zvir) + + call get_pressure_given_height(isc, iec, jsc, jec, ngc, npz, wz, 1, height(2), & + Atm(n)%pt(:,:,npz), Atm(n)%peln, a2, 1.) + ! sea level pressure in Pa + Tracker(nt)%slp=a2(:,:) + call prt_maxmin('slp', Tracker(nt)%slp, isc, iec, jsc, jec, 0, 1, 1.) + + idg(:) = 1 + call get_height_given_pressure(isc, iec, jsc, jec, npz, wz, nplev_tracker, idg, plevs, Atm(n)%peln, a3) + Tracker(nt)%z700=a3(isc:iec,jsc:jec,1) + Tracker(nt)%z850=a3(isc:iec,jsc:jec,2) + call prt_maxmin('z700', Tracker(nt)%z700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('z850', Tracker(nt)%z850, isc, iec, jsc, jec, 0, 1, 1.) + + call cs3_interpolator(isc,iec,jsc,jec,npz, Atm(n)%ua(isc:iec,jsc:jec,:), nplev_tracker, & + pout(1:nplev_tracker), wz, Atm(n)%pe(isc:iec,1:npz+1,jsc:jec), idg, a3, -1) + Tracker(nt)%u700=a3(isc:iec,jsc:jec,1) + Tracker(nt)%u850=a3(isc:iec,jsc:jec,2) + call prt_maxmin('u700', Tracker(nt)%u700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('u850', Tracker(nt)%u850, isc, iec, jsc, jec, 0, 1, 1.) + + call cs3_interpolator(isc,iec,jsc,jec,npz, Atm(n)%va(isc:iec,jsc:jec,:), nplev_tracker, & + pout(1:nplev_tracker), wz, Atm(n)%pe(isc:iec,1:npz+1,jsc:jec), idg, a3, -1) + Tracker(nt)%v700=a3(isc:iec,jsc:jec,1) + Tracker(nt)%v850=a3(isc:iec,jsc:jec,2) + call prt_maxmin('v700', Tracker(nt)%v700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('v850', Tracker(nt)%v850, isc, iec, jsc, jec, 0, 1, 1.) + + call interpolate_z(isc, iec, jsc, jec, npz, 10., wz, Atm(n)%ua(isc:iec,jsc:jec,:), a2) + Tracker(nt)%u10m=a2(isc:iec,jsc:jec) + call interpolate_z(isc, iec, jsc, jec, npz, 10., wz, Atm(n)%va(isc:iec,jsc:jec,:), a2) + Tracker(nt)%v10m=a2(isc:iec,jsc:jec) + call prt_maxmin('u10m', Tracker(nt)%u10m, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('v10m', Tracker(nt)%v10m, isc, iec, jsc, jec, 0, 1, 1.) + + call get_vorticity(isc, iec, jsc, jec, isd, ied, jsd, jed, npz, Atm(n)%u, Atm(n)%v, wk, & + Atm(n)%gridstruct%dx, Atm(n)%gridstruct%dy, Atm(n)%gridstruct%rarea) + call interpolate_vertical(isc, iec, jsc, jec, npz, & + 700.e2, Atm(n)%peln, wk, a2) + Tracker(nt)%vort700=a2(:,:) + call interpolate_vertical(isc, iec, jsc, jec, npz, & + 850.e2, Atm(n)%peln, wk, a2) + Tracker(nt)%vort850=a2(:,:) + call interpolate_z(isc, iec, jsc, jec, npz, 10., wz, wk, a2) + Tracker(nt)%vort10m=a2(:,:) + call prt_maxmin('vort700', Tracker(nt)%vort700, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('vort850', Tracker(nt)%vort850, isc, iec, jsc, jec, 0, 1, 1.) + call prt_maxmin('vort10m', Tracker(nt)%vort10m, isc, iec, jsc, jec, 0, 1, 1.) + + do j=jsc,jec + do i=isc,iec + Tracker(nt)%spd700(i,j)=sqrt(Tracker(nt)%u700(i,j)**2 + Tracker(nt)%v700(i,j)**2) + Tracker(nt)%spd850(i,j)=sqrt(Tracker(nt)%u850(i,j)**2 + Tracker(nt)%v850(i,j)**2) + Tracker(nt)%spd10m(i,j)=sqrt(Tracker(nt)%u10m(i,j)**2 + Tracker(nt)%v10m(i,j)**2) + enddo + enddo + ! enddo ! end ntileMe do-loop + + if (allocated(a2)) deallocate(a2) + if (allocated(wk)) deallocate(wk) + if (allocated(a3)) deallocate(a3) + if (allocated(wz)) deallocate(wz) + + end subroutine fv_diag_tracker + + subroutine ntc_impl(Atm,tracker,Time, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + ! This is the main entry point to the tracker. It is most similar + ! to the function "tracker" in the GFDL/NCEP vortex tracker. + + implicit none + type(fv_atmos_type), intent(inout) :: Atm + type(fv_tracker_type), intent(inout) :: tracker + type(time_type), intent(in) :: Time + integer, intent(in) :: ids,ide,jds,jde,kds,kde + integer, intent(in) :: ims,ime,jms,jme,kms,kme + integer, intent(in) :: ips,ipe,jps,jpe,kps,kpe + + real :: dxdymean, sumdxa, sumdya + integer :: i, j, iweights, ip + + integer :: iguess, jguess ! first guess location + real :: latguess, longuess ! same, but in lat & lon + + integer :: iuvguess, juvguess ! "second guess" location using everything except wind maxima + real :: srsq + integer :: ifinal, jfinal + real :: latfinal, lonfinal + integer :: ierr + integer :: icen(maxtp), jcen(maxtp) ! center locations for each parameter + real :: loncen(maxtp), latcen(maxtp) ! lat, lon locations in degrees + logical :: calcparm(maxtp) ! do we have a valid center location for this parameter? + real :: max_wind, min_pres ! for ATCF output + real :: rcen(maxtp) ! center value (max wind, min mslp, etc.) + character*255 :: message + logical :: north_hemi ! true = northern hemisphere + logical :: have_guess ! first guess is available + real :: guessdist, guessdeg ! first guess distance to nearest point on grid + real :: latnear, lonnear ! nearest point in grid to first guess + + ! icen,jcen: Same meaning as clon, clat in tracker, but uses i and + ! j indexes of the center instead of lat/lon. Tracker comment: + ! Holds the coordinates for the center positions for + ! all storms at all times for all parameters. + ! (max_#_storms, max_fcst_times, max_#_parms). + ! For the third position (max_#_parms), here they are: + ! 1: Relative vorticity at 850 mb + ! 2: Relative vorticity at 700 mb + ! 3: Vector wind magnitude at 850 mb + ! 4: NOT CURRENTLY USED + ! 5: Vector wind magnitude at 700 mb + ! 6: NOT CURRENTLY USED + ! 7: Geopotential height at 850 mb + ! 8: Geopotential height at 700 mb + ! 9: Mean Sea Level Pressure + ! 10: Vector wind magnitude at 10 m + ! 11: Relative vorticity at 10 m + + call mpp_error(NOTE, 'ntc_impl') + + ! Initialize center information to invalid values for all centers: + icen=-99 + jcen=-99 + latcen=9e9 + loncen=9e9 + rcen=9e9 + calcparm=.false. + if(Moving_nest(2)%mn_flag%vortex_tracker==6) then ! TODO pick correct Moving_nest structure + srsq=searchrad_6*searchrad_6*1e6 + else + srsq=searchrad_7*searchrad_7*1e6 + endif + + ! Estimate the domain wide mean grid spacing in km + sumdxa=0.0 + sumdya=0.0 + do j=jps,min(jde-1,jpe) + do i=ips,min(ide-1,ipe) + sumdxa=sumdxa+Atm%gridstruct%dxa(i,j) + sumdya=sumdya+Atm%gridstruct%dya(i,j) + enddo + enddo + + call mp_reduce_sum(sumdxa) + call mp_reduce_sum(sumdya) + dxdymean=0.5*(sumdxa + sumdya)/((ide-ids) * (jde-jds)) / 1000.0 + + ! Get the square of the approximate distance to the domain center + ! at all points: + call get_distsq(Atm, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + ! Get the first guess from the prior nest motion timestep: + have_guess=tracker%track_have_guess + if(have_guess) then + ! We have a first guess center. We have to translate it to gridpoint space. + longuess=tracker%track_guess_lon + latguess=tracker%track_guess_lat + call get_nearest_lonlat(Atm,iguess,jguess,ierr,longuess,latguess, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe, & + lonnear, latnear) + if(ierr==0) then + call calcdist(longuess,latguess, lonnear,latnear, guessdist,guessdeg) + if(guessdist > Atm%neststruct%refinement*dxdymean) then +108 format('WARNING: guess lon=',F0.3,',lat=',F0.3, & + ' too far (',F0.3,'km) from nearest point lon=',F0.3,',lat=',F0.3, & + '. Will use domain center as first guess.') + write(message,108) tracker%track_guess_lon,tracker%track_guess_lat, & + guessdist,lonnear,latnear + call mpp_error(NOTE, message) + have_guess=.false. ! indicate that the first guess is unusable + else + latguess=latnear + longuess=lonnear + endif + else + have_guess=.false. ! indicate that the first guess is unusable. +109 format('WARNING: guess lon=',F0.3,',lat=',F0.3, & + ' does not exist in this domain. Will use domain center as first guess.') + write(message,109) tracker%track_guess_lon,tracker%track_guess_lat + call mpp_error(NOTE, message) + endif + endif + + ! If we could not get the first guess from the prior nest motion + ! timestep, then use the default first guess: the domain center. + if(Moving_nest(2)%mn_flag%vortex_tracker==6 .or. .not.have_guess) then + ! vt=6: hard coded first-guess center is domain center: + ! vt=7: first guess comes from prior timestep + ! Initial first guess is domain center. + ! Backup first guess is domain center if first guess is unusable. + iguess=(ide-ids)/2+ids + jguess=(jde-jds)/2+jds + if(Moving_nest(2)%mn_flag%vortex_tracker==7) then + call mpp_error(NOTE, 'Using domain center as first guess since no valid first guess is available.') + endif + call get_lonlat(Atm,iguess,jguess,longuess,latguess,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + if(ierr/=0) then + call mpp_error(FATAL, "ERROR: center of domain is not inside the domain") + endif + have_guess=.true. + endif + + if(.not.have_guess) then + call mpp_error(FATAL, "INTERNAL ERROR: No first guess is available (should never happen).") + endif + + north_hemi = latguess>0.0 + + ! Find the centers of all fields except the wind minima: + call find_center(Atm,tracker%vort850,srsq, & + icen(1),jcen(1),rcen(1),calcparm(1),loncen(1),latcen(1),dxdymean,'zeta', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, north_hemi=north_hemi) + call find_center(Atm,tracker%vort700,srsq, & + icen(2),jcen(2),rcen(2),calcparm(2),loncen(2),latcen(2),dxdymean,'zeta', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, north_hemi=north_hemi) + call find_center(Atm,tracker%z850,srsq, & + icen(7),jcen(7),rcen(7),calcparm(7),loncen(7),latcen(7),dxdymean,'hgt', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + call find_center(Atm,tracker%z700,srsq, & + icen(8),jcen(8),rcen(8),calcparm(8),loncen(8),latcen(8),dxdymean,'hgt', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + call find_center(Atm,tracker%slp,srsq, & + icen(9),jcen(9),rcen(9),calcparm(9),loncen(9),latcen(9),dxdymean,'slp', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + call find_center(Atm,tracker%vort10m,srsq, & + icen(11),jcen(11),rcen(11),calcparm(11),loncen(11),latcen(11),dxdymean,'zeta', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, north_hemi=north_hemi) + + ! Get a guess center location for the wind minimum searches: + call get_uv_guess(Atm,icen,jcen,loncen,latcen,calcparm, & + iguess,jguess,longuess,latguess,iuvguess,juvguess, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + ! Find wind minima. Requires a first guess center: + windmin: if(Moving_nest(2)%mn_flag%vortex_tracker==6) then + call find_center(Atm,tracker%spd850,srsq, & + icen(3),jcen(3),rcen(3),calcparm(3),loncen(3),latcen(3),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call find_center(Atm,tracker%spd700,srsq, & + icen(5),jcen(5),rcen(5),calcparm(5),loncen(5),latcen(5),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call find_center(Atm,tracker%spd10m,srsq, & + icen(10),jcen(10),rcen(10),calcparm(10),loncen(10),latcen(10),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + else + call get_uv_center(Atm,tracker%spd850, & + icen(3),jcen(3),rcen(3),calcparm(3),loncen(3),latcen(3),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call get_uv_center(Atm,tracker%spd700, & + icen(5),jcen(5),rcen(5),calcparm(5),loncen(5),latcen(5),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + call get_uv_center(Atm,tracker%spd10m, & + icen(10),jcen(10),rcen(10),calcparm(10),loncen(10),latcen(10),dxdymean,'wind', & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe, & + iuvguess=iuvguess, juvguess=juvguess) + endif windmin + + ! Get a final guess center location: + call fixcenter(Atm,icen,jcen,calcparm,loncen,latcen, & + iguess,jguess,longuess,latguess, & + ifinal,jfinal,lonfinal,latfinal, & + north_hemi, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + + tracker%tracker_fixes=0 + do ip=1,maxtp + if(calcparm(ip)) then + if(icen(ip)>=ips .and. icen(ip)<=ipe & + .and. jcen(ip)>=jps .and. jcen(ip)<=jpe) then + tracker%tracker_fixes(icen(ip),jcen(ip))=ip + endif + endif + enddo + + if(iguess>=ips .and. iguess<=ipe .and. jguess>=jps .and. jguess<=jpe) then + tracker%tracker_fixes(iguess,jguess)=-1 + endif + + if(iuvguess>=ips .and. iuvguess<=ipe .and. juvguess>=jps .and. juvguess<=jpe) then + tracker%tracker_fixes(iuvguess,juvguess)=-2 + endif + + if(ifinal>=ips .and. ifinal<=ipe .and. jfinal>=jps .and. jfinal<=jpe) then + tracker%tracker_fixes(ifinal,jfinal)=-3 + endif + + call get_tracker_distsq(Atm, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + call get_wind_pres_intensity(Atm, & + tracker%tracker_pmin,tracker%tracker_vmax,tracker%tracker_rmw, & + max_wind_search_radius, min_mlsp_search_radius, & + lonfinal,latfinal, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + +205 format('tracker fixlon=',F8.3, ' fixlat=',F8.3, & + ' ifix=',I6,' jfix=',I6, & + ' pmin=',F12.3,' vmax=',F8.3,' rmw=',F8.3) + write(message,205) tracker%tracker_fixlon, tracker%tracker_fixlat, & + tracker%tracker_ifix, tracker%tracker_jfix, & + tracker%tracker_pmin, tracker%tracker_vmax, tracker%tracker_rmw + call mpp_error(NOTE, message) + + if(is_master()) then + call output_partial_atcfunix(Atm,Time, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + endif + + end subroutine ntc_impl + + subroutine get_ijk_from_domain(Atm, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + implicit none + type(fv_atmos_type), intent(in) :: Atm + + integer, intent(out) :: ids,ide,jds,jde,kds,kde + integer, intent(out) :: ims,ime,jms,jme,kms,kme + integer, intent(out) :: ips,ipe,jps,jpe,kps,kpe + + ids = 1 + ide = Atm%npx + jds = 1 + jde = Atm%npy + kds = 1 + kde = Atm%npz + call mpp_get_data_domain(Atm%domain, ims, ime, jms, jme) + kms = 1 + kme = Atm%npz + call mpp_get_compute_domain(Atm%domain, ips, ipe, jps, jpe) + kps = 1 + kpe = Atm%npz + end subroutine get_ijk_from_domain + + subroutine get_nearest_lonlat(Atm,iloc,jloc,ierr,lon,lat, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe, & + lonnear, latnear) + ! Finds the nearest point in the domain to the specified lon,lat + ! location. + implicit none + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: ids,ide,jds,jde,kds,kde + integer, intent(in) :: ims,ime,jms,jme,kms,kme + integer, intent(in) :: ips,ipe,jps,jpe,kps,kpe + integer, intent(out) :: iloc,jloc,ierr + real, intent(in) :: lon,lat + real :: dx,dy,d,dmin, zdummy, latmin,lonmin + integer :: i,j,imin,jmin + real, intent(out), optional :: latnear, lonnear + + zdummy=42 + dmin=9e9 + imin=-99 + jmin=-99 + latmin=9e9 + lonmin=9e9 + ierr=0 + do j=jps,min(jde-1,jpe) + do i=ips,min(ide-1,ipe) + dy=abs(lat-Atm%gridstruct%agrid(i,j,2)*rad_to_deg) + dx=abs(mod(3600.+180.+(lon-Atm%gridstruct%agrid(i,j,1)*rad_to_deg),360.)-180.) + d=dx*dx+dy*dy + if(dlocalextreme) then + localextreme=windsq + locali=i + localj=j + endif + endif + enddo + enddo + if(localextreme>0) localextreme=sqrt(localextreme) + + globalextreme=localextreme + globali=locali + globalj=localj + call mp_reduce_maxval(globalextreme,globali,globalj) + + call get_lonlat(Atm,globali,globalj,globallon,globallat,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + its,ite, jts,jte, kts,kte) + if(ierr/=0) then + call mpp_error(WARNING, "WARNING: Unable to find location of wind maximum.") + rmw=-99 + else + call calcdist(clon,clat,globallon,globallat,rmw,degrees) + end if + + ! Get the guess location for the next time: + max_wind=globalextreme + if(globali<0 .or. globalj<0) then + call mpp_error(WARNING, "WARNING: No wind values found that were greater than -9*10^9.") + min_mslp=-999 + endif + + end subroutine get_wind_pres_intensity + + subroutine fixcenter(Atm,icen,jcen,calcparm,loncen,latcen, & + iguess,jguess,longuess,latguess, & + ifinal,jfinal,lonfinal,latfinal, & + north_hemi, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + ! This is the same as "fixcenter" in gettrk_main. Original comment: + ! + ! ABSTRACT: This subroutine loops through the different parameters + ! for the input storm number (ist) and calculates the + ! center position of the storm by taking an average of + ! the center positions obtained for those parameters. + ! First we check to see which parameters are within a + ! max error range (errmax), and we discard those that are + ! not within that range. Of the remaining parms, we get + ! a mean position, and then we re-calculate the position + ! by giving more weight to those estimates that are closer + ! to this mean first-guess position estimate. + + ! Arguments: Input: + ! grid - the grid being processed + ! icen,jcen - arrays of center gridpoint locations + ! calcperm - array of center validity flags (true = center is valid) + ! loncen,latcen - center geographic locations + ! iguess,jguess - first guess gridpoint location + ! longuess,latguess - first guess geographic location + + ! Arguments: Output: + ! ifinal,jfinal - final center gridpoint location + ! lonfinal,latfinal - final center geographic location + + ! Arguments: Optional input: + ! north_hemi - true = northern hemisphere, false=south + + implicit none + integer, intent(in) :: & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: icen(maxtp), jcen(maxtp) + real, intent(in) :: loncen(maxtp), latcen(maxtp) + logical, intent(inout) :: calcparm(maxtp) + + integer, intent(in) :: iguess,jguess + real, intent(in) :: latguess,longuess + + integer, intent(inout) :: ifinal,jfinal + real, intent(inout) :: lonfinal,latfinal + + logical, intent(in), optional :: north_hemi + + character*255 :: message + real :: errdist(maxtp),avgerr,errmax,errinit,xavg_stderr + real :: dist,degrees, total + real :: minutes,hours,trkerr_avg,dist_from_mean(maxtp),wsum + integer :: ip,itot4next,iclose,count,ifound,ierr + integer(kind=8) :: isum,jsum + real :: irsum,jrsum,errtmp,devia,wtpos + real :: xmn_dist_from_mean, stderr_close + logical use4next(maxtp) + + ! Determine forecast hour: + hours=time_type_to_real(Atm%Time-Atm%Time_Init)/3600. + + ! Decide maximum values for distance and std. dev.: + if(hours<0.5) then + errmax=err_reg_init + errinit=err_reg_init + else + errmax=err_reg_max + errinit=err_reg_max + endif + + if(hours>4.) then + xavg_stderr = ( Tracker(n)%track_stderr_m1 + & + Tracker(n)%track_stderr_m2 + Tracker(n)%track_stderr_m3 ) / 3.0 + elseif(hours>3.) then + xavg_stderr = ( Tracker(n)%track_stderr_m1 + Tracker(n)%track_stderr_m2 ) / 2.0 + elseif(hours>2.) then + xavg_stderr = Tracker(n)%track_stderr_m1 + endif + + if(hours>2.) then + errtmp = 3.0*xavg_stderr*errpgro + errmax = max(errtmp,errinit) + errtmp = errpmax + errmax = min(errmax,errtmp) + endif + + ! Initialize loop variables: + errdist=0.0 + use4next=.false. + trkerr_avg=0 + itot4next=0 + iclose=0 + isum=0 + jsum=0 + ifound=0 + + do ip=1,maxtp + if(ip==4 .or. ip==6) then + calcparm(ip)=.false. + cycle + elseif(calcparm(ip)) then + ifound=ifound+1 + call calcdist(longuess,latguess,loncen(ip),latcen(ip),dist,degrees) + errdist(ip)=dist + if(dist<=errpmax) then + if(ip==3 .or. ip==5 .or. ip==10) then + use4next(ip)=.false. + else + use4next(ip)=.true. + trkerr_avg=trkerr_avg+dist + itot4next=itot4next+1 + endif + endif + if(dist<=errmax) then + iclose=iclose+1 + isum=isum+icen(ip) + jsum=jsum+jcen(ip) + else + calcparm(ip)=.false. + endif + endif + enddo + + if(ifound<=0) then + call mpp_error(NOTE, 'The tracker could not find the centers for any parameters. & + Thus, a center position could not be obtained for this storm.') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + if(iclose<=0) then +200 format('No storms are within errmax=',F0.1,'km of the parameters') + write(message,200) errmax + call mpp_error(NOTE, message) + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + ifinal=real(isum)/real(iclose) + jfinal=real(jsum)/real(iclose) + +504 format(' calculated ifinal, jfinal: ifinal=',I0,' jfinal=',I0,' isum=',I0,' jsum=',I0,' iclose=',I0) + !write(0,504) ifinal,jfinal,isum,jsum,iclose + + call get_lonlat(Atm,ifinal,jfinal,lonfinal,latfinal,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + if(ierr/=0) then + call mpp_error(NOTE, 'Gave up on finding the storm location due to error in get_lonlat (1).') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + count=0 + dist_from_mean=0.0 + total=0.0 + do ip=1,maxtp + if(calcparm(ip)) then + call calcdist(lonfinal,latfinal,loncen(ip),latcen(ip),dist,degrees) + dist_from_mean(ip)=dist + total=total+dist + count=count+1 + endif + enddo + xmn_dist_from_mean=total/real(count) + + do ip=1,maxtp + if(calcparm(ip)) then + total=total+(xmn_dist_from_mean-dist_from_mean(ip))**2 + endif + enddo + if(count<2) then + stderr_close=0.0 + else + stderr_close=max(1.0,sqrt(1./(count-1) * total)) + endif + + if(calcparm(1) .or. calcparm(2) .or. calcparm(7) .or. & + calcparm(8) .or. calcparm(9) .or. calcparm(11)) then + continue + else + ! Message copied straight from tracker: + call mpp_error(NOTE, 'In fixcenter, STOPPING PROCESSING for this storm. The reason is that') + call mpp_error(NOTE, 'none of the fix locations for parms z850, z700, zeta 850, zeta 700') + call mpp_error(NOTE, 'MSLP or sfc zeta were within a reasonable distance of the guess location.') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + ! Recalculate the final center location using weights + if(stderr_close<5.0) then + ! Old code forced a minimum of 5.0 stddev + stderr_close=5.0 + endif + irsum=0 + jrsum=0 + wsum=0 + do ip=1,maxtp + if(calcparm(ip)) then + devia=max(1.0,dist_from_mean(ip)/stderr_close) + wtpos=exp(-devia/3.) + irsum=icen(ip)*wtpos+irsum + jrsum=jcen(ip)*wtpos+jrsum + wsum=wtpos+wsum +1100 format(' Adding parm: devia=',F0.3,' wtpos=',F0.3,' irsum=',F0.3,' jrsum=',F0.3,' wsum=',F0.3) + !write(0,1100) devia,wtpos,irsum,jrsum,wsum + endif + enddo + ifinal=nint(real(irsum)/real(wsum)) + jfinal=nint(real(jrsum)/real(wsum)) + call get_lonlat(Atm,ifinal,jfinal,lonfinal,latfinal,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + if(ierr/=0) then + call mpp_error(NOTE, 'Gave up on finding the storm location due to error in get_lonlat (2).') + ! Use domain center as storm location + Tracker(n)%tracker_ifix=(ide-ids)/2+ids + Tracker(n)%tracker_jfix=(jde-jds)/2+jds + Tracker(n)%tracker_havefix=.false. + Tracker(n)%tracker_gave_up=.true. + Tracker(n)%tracker_fixlon=-999.0 + Tracker(n)%tracker_fixlat=-999.0 + return + endif + + ! Store the lat/lon location: + Tracker(n)%tracker_fixlon=lonfinal + Tracker(n)%tracker_fixlat=latfinal + Tracker(n)%tracker_ifix=ifinal + Tracker(n)%tracker_jfix=jfinal + Tracker(n)%tracker_havefix=.true. + + if(nint(hours) > Tracker(n)%track_last_hour ) then + ! It is time to recalculate the std. dev. of the track: + count=0 + dist_from_mean=0.0 + total=0.0 + do ip=1,maxtp + if(calcparm(ip)) then + call calcdist(lonfinal,latfinal,loncen(ip),loncen(ip),dist,degrees) + dist_from_mean(ip)=dist + total=total+dist + count=count+1 + endif + enddo + xmn_dist_from_mean=total/real(count) + + do ip=1,maxtp + if(calcparm(ip)) then + total=total+(xmn_dist_from_mean-dist_from_mean(ip))**2 + endif + enddo + if(count<2) then + stderr_close=0.0 + else + stderr_close=max(1.0,sqrt(1./(count-1) * total)) + endif + + Tracker(n)%track_stderr_m3=Tracker(n)%track_stderr_m2 + Tracker(n)%track_stderr_m2=Tracker(n)%track_stderr_m1 + Tracker(n)%track_stderr_m1=stderr_close + Tracker(n)%track_last_hour=nint(hours) + endif + + return + + end subroutine fixcenter + + subroutine get_uv_guess(Atm,icen,jcen,loncen,latcen,calcparm, & + iguess,jguess,longuess,latguess,iout,jout, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + its,ite,jts,jte,kts,kte) + ! This is a rewrite of the gettrk_main.f get_uv_guess. Original comment: + ! ABSTRACT: The purpose of this subroutine is to get a modified + ! first guess lat/lon position before searching for the + ! minimum in the wind field. The reason for doing this is + ! to better refine the guess and avoid picking up a wind + ! wind minimum far away from the center. So, use the + ! first guess position (and give it strong weighting), and + ! then also use the fix positions for the current time + ! (give the vorticity centers stronger weighting as well), + ! and then take the average of these positions. + + ! Arguments: Input: + ! grid - grid being searched + ! icen,jcen - tracker parameter center gridpoints + ! loncen,latcen - tracker parameter centers' geographic locations + ! calcparm - is each center valid? + ! iguess, jguess - first guess gridpoint location + ! longuess,latguess - first guess geographic location + + ! Arguments: Output: + ! iout,jout - uv guess center location + + implicit none + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: ids,ide,jds,jde,kds,kde + integer, intent(in) :: ims,ime,jms,jme,kms,kme + integer, intent(in) :: its,ite,jts,jte,kts,kte + + integer, intent(in) :: icen(maxtp), jcen(maxtp) + real, intent(in) :: loncen(maxtp), latcen(maxtp) + logical, intent(in) :: calcparm(maxtp) + + integer, intent(in) :: iguess,jguess + real, intent(in) :: latguess,longuess + + integer, intent(inout) :: iout,jout + real :: degrees,dist + integer :: ip,ict + integer(kind=8) :: isum,jsum + + ict=2 + isum=2*iguess + jsum=2*jguess + + ! Get a guess storm center location for searching for the wind centers: + do ip=1,maxtp + if ((ip > 2 .and. ip < 7) .or. ip == 10) then + cycle ! because 3-6 are for 850 & 700 u & v and 10 is + ! for surface wind magnitude. + elseif(calcparm(ip)) then + call calcdist (longuess,latguess,loncen(ip),latcen(ip),dist,degrees) + if(distrcen .and. Tracker(n)%distsq(i,j)\c NOTE: The latitude arguments passed to the + ! B / \ subr are the actual lat vals, but in + ! \ the calculation we use 90-lat. + ! a \ . + ! \pt. NOTE: You may get strange results if you: + ! C (1) use positive values for SH lats AND + ! you try computing distances across the + ! equator, or (2) use lon values of 0 to + ! -180 for WH lons AND you try computing + ! distances across the 180E meridian. + ! + ! NOTE: In the diagram above, (a) is the angle between pt. B and + ! pt. C (with pt. x as the vertex), and (A) is the difference in + ! longitude (in degrees, absolute value) between pt. B and pt. C. + ! + ! !!! NOTE !!! -- THE PARAMETER ecircum IS DEFINED (AS OF THE + ! ORIGINAL WRITING OF THIS SYSTEM) IN KM, NOT M, SO BE AWARE THAT + ! THE DISTANCE RETURNED FROM THIS SUBROUTINE IS ALSO IN KM. + ! + implicit none + + real, intent(inout) :: degrees + real, intent(out) :: xdist + real, intent(in) :: rlonb,rlatb,rlonc,rlatc + real, parameter :: dtr = 0.0174532925199433 + real :: distlatb,distlatc,pole,difflon,cosanga,circ_fract + ! + if (rlatb < 0.0 .or. rlatc < 0.0) then + pole = -90. + else + pole = 90. + endif + ! + distlatb = (pole - rlatb) * dtr + distlatc = (pole - rlatc) * dtr + difflon = abs( (rlonb - rlonc)*dtr ) + ! + cosanga = ( cos(distlatb) * cos(distlatc) + & + sin(distlatb) * sin(distlatc) * cos(difflon)) + + ! This next check of cosanga is needed since I have had ACOS crash + ! when calculating the distance between 2 identical points (should + ! = 0), but the input for ACOS was just slightly over 1 + ! (e.g., 1.00000000007), due to (I'm guessing) rounding errors. + + if (cosanga > 1.0) then + cosanga = 1.0 + endif + + degrees = acos(cosanga) / dtr + circ_fract = degrees / 360. + xdist = circ_fract * ecircum + ! + ! NOTE: whether this subroutine returns the value of the distance + ! in km or m depends on the scale of the parameter ecircum. + ! At the original writing of this subroutine (7/97), ecircum + ! was given in km. + ! + return + end subroutine calcdist + + subroutine get_lonlat(Atm,iguess,jguess,longuess,latguess,ierr, & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe) + ! Returns the latitude (latguess) and longitude (longuess) of the + ! specified location (iguess,jguess) in the specified grid. + implicit none + integer, intent(in) :: & + ids,ide, jds,jde, kds,kde, & + ims,ime, jms,jme, kms,kme, & + ips,ipe, jps,jpe, kps,kpe + integer, intent(out) :: ierr + type(fv_atmos_type), intent(inout) :: Atm + integer, intent(in) :: iguess,jguess + real, intent(inout) :: longuess,latguess + real :: weight,zjunk + integer :: itemp,jtemp + + ierr=0 + zjunk=1 + if(iguess>=ips .and. iguess<=ipe .and. jguess>=jps .and. jguess<=jpe) then + weight=1 + longuess=Atm%gridstruct%agrid(iguess,jguess,1)*rad_to_deg + latguess=Atm%gridstruct%agrid(iguess,jguess,2)*rad_to_deg + itemp=iguess + jtemp=jguess + else + weight=0 + longuess=-999.9 + latguess=-999.9 + itemp=-99 + jtemp=-99 + endif + + call mp_reduce_maxloc(weight,latguess,longuess,zjunk,itemp,jtemp) + + if(itemp==-99 .and. jtemp==-99) then + ierr=95 + endif + end subroutine get_lonlat + + subroutine clean_lon_lat(xlon1,ylat1) + real, intent(inout) :: xlon1,ylat1 + ! This modifies a (lat,lon) pair so that the longitude fits + ! between [-180,180] and the latitude between [-90,90], taking + ! into account spherical geometry. + ! NOTE: inputs and outputs are in degrees + xlon1=(mod(xlon1+3600.+180.,360.)-180.) + ylat1=(mod(ylat1+3600.+180.,360.)-180.) + if(ylat1>90.) then + ylat1=180.-ylat1 + xlon1=mod(xlon1+360.,360.)-180. + elseif(ylat1<-90.) then + ylat1=-180. - ylat1 + xlon1=mod(xlon1+360.,360.)-180. + endif + end subroutine clean_lon_lat + + !---------------------------------------------------------------------------------- + ! These two simple routines return an N, S, E or W for the + ! hemisphere of a latitude or longitude. + character(1) function get_lat_ns(lat) + ! This could be written simply as merge('N','S',lat>=0) if F95 allowed + implicit none + real :: lat + if(lat>=0) then + get_lat_ns='N' + else + get_lat_ns='S' + endif + end function get_lat_ns + character(1) function get_lon_ew(lon) + ! This could be written simply as merge('E','W',lon>=0) if F95 allowed + implicit none + real :: lon + if(lon>=0) then + get_lon_ew='E' + else + get_lon_ew='W' + endif + end function get_lon_ew + + subroutine fv_tracker_post_move(Atm) + ! This updates the tracker i/j fix location and square of the + ! distance to the tracker center after a nest move. + type(fv_atmos_type), intent(inout) :: Atm + integer :: ierr, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe + + ! Get the grid bounds: + CALL get_ijk_from_domain(Atm, & + ids, ide, jds, jde, kds, kde, & + ims, ime, jms, jme, kms, kme, & + ips, ipe, jps, jpe, kps, kpe ) + + ! Get the i/j center location from the fix location: + ierr=0 + call get_nearest_lonlat(Atm,Tracker(n)%tracker_ifix,Tracker(n)%tracker_jfix, & + ierr,Tracker(n)%tracker_fixlon,Tracker(n)%tracker_fixlat, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + + ! Get the square of the approximate distance to the tracker center + ! at all points: + if(ierr==0) & + call get_tracker_distsq(Atm, & + ids,ide,jds,jde,kds,kde, & + ims,ime,jms,jme,kms,kme, & + ips,ipe,jps,jpe,kps,kpe) + end subroutine fv_tracker_post_move + +#ifdef DEBUG + subroutine check_validity(cparm, v, i, j) + ! [KA] Checks value of a tracking parameter for validity + character*(*), intent(in) :: cparm + real, intent(in) :: v + integer, intent(in) :: i, j + real :: min_v, max_v + integer :: this_pe + + min_v = -9e9 + max_v = 9e9 + this_pe = mpp_pe() + + !< set validity range + select case (trim(cparm)) + case ("zeta") + !< low-level vorticity + min_v = -1e-2 + max_v = 1e-2 + case ("hgt") + !< low-level geopotential height + min_v = 1e2 + max_v = 1e4 + case ("slp") + !< sea-level pressure + min_v = 0.85e5 + max_v = 1.10e5 + case ("wind") + !< low-level wind + min_v = 1e-3 + max_v = 2e2 + case default + !< Unrecognized parameter; must be invalid + write(0,"(A,A8)") "[KA] inval track variable:",trim(cparm) + return + end select + + !< check value for validity + if (v < min_v .OR. v > max_v) then + !< report bad value, its name, its indices, the containing pe + write(0,"(A,A8,A,E8.1,A,I3,A,2I3)") & + "[KA] inval track val:",trim(cparm)," val:",v," pe:",this_pe," i,j:",i,j + endif + + end subroutine check_validity + +#endif !< DEBUG + +end module fv_tracker_mod