From 99dd275177db304b0bdaa91837fc209f9c8a32cf Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Fri, 15 Nov 2024 21:21:59 +0000 Subject: [PATCH 01/19] Update CAPE config and unit conversion --- ...EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf | 6 +- .../GenEnsProd_fcstREFS_obsPREPBUFR_SFC.conf | 4 +- ...ointStat_fcstREFSmean_obsPREPBUFR_SFC.conf | 7 +- ...t_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf | 7 +- .../cam/exevs_refs_grid2obs_cape_plots.sh | 1 + .../cam/exevs_refs_grid2obs_ctc_plots.sh | 4 +- .../cam/exevs_refs_grid2obs_ecnt_plots.sh | 4 +- scripts/plots/cam/exevs_refs_precip_plots.sh | 4 +- .../cam/exevs_refs_precip_spatial_plots.sh | 5 +- scripts/plots/cam/exevs_refs_profile_plots.sh | 5 +- .../plots/cam/exevs_refs_snowfall_plots.sh | 5 +- .../plots/cam/exevs_refs_spcoutlook_plots.sh | 33 +- ush/cam/ush_refs_plot_py/df_preprocessing.py | 10 +- ush/cam/ush_refs_plot_py/lead_average.py | 119 ++++-- .../ush_refs_plot_py/lead_average_valid.py | 110 +++-- .../ush_refs_plot_py/performance_diagram.py | 81 ++-- ush/cam/ush_refs_plot_py/plot_util.py | 379 ++++++++++++------ ush/cam/ush_refs_plot_py/prune_stat_files.py | 9 +- ush/cam/ush_refs_plot_py/refs_atmos_util.py | 9 +- ush/cam/ush_refs_plot_py/settings.py | 133 +++++- ush/cam/ush_refs_plot_py/stat_by_level.py | 70 +++- ush/cam/ush_refs_plot_py/threshold_average.py | 70 +++- ush/cam/ush_refs_plot_py/time_series.py | 78 +++- .../ush_refs_plot_py/valid_hour_average.py | 361 ++++++++++++----- 24 files changed, 1093 insertions(+), 421 deletions(-) diff --git a/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf b/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf index 60ff4a6485..ba2329113c 100755 --- a/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf +++ b/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf @@ -66,7 +66,7 @@ FCST_VAR7_LEVELS = L0 FCST_VAR7_OPTIONS = GRIB_lvl_typ = 1 OBS_VAR7_NAME = {FCST_VAR7_NAME} OBS_VAR7_LEVELS = {FCST_VAR7_LEVELS} -OBS_VAR7_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = UNION +OBS_VAR7_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION FCST_VAR8_NAME = RH FCST_VAR8_LEVELS = Z2 @@ -75,10 +75,10 @@ OBS_VAR8_LEVELS = {FCST_VAR8_LEVELS} FCST_VAR9_NAME = CAPE FCST_VAR9_LEVELS = P90-0 -FCST_VAR9_OPTIONS = cnt_thresh = [ >0 ] +FCST_VAR9_OPTIONS = cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; OBS_VAR9_NAME = MLCAPE OBS_VAR9_LEVELS = L0-100000 -OBS_VAR9_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = UNION +OBS_VAR9_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION; FCST_VAR10_NAME = GUST FCST_VAR10_LEVELS = L0 diff --git a/parm/metplus_config/stats/cam/grid2obs/GenEnsProd_fcstREFS_obsPREPBUFR_SFC.conf b/parm/metplus_config/stats/cam/grid2obs/GenEnsProd_fcstREFS_obsPREPBUFR_SFC.conf index 04404ce42f..1d8941bb0d 100644 --- a/parm/metplus_config/stats/cam/grid2obs/GenEnsProd_fcstREFS_obsPREPBUFR_SFC.conf +++ b/parm/metplus_config/stats/cam/grid2obs/GenEnsProd_fcstREFS_obsPREPBUFR_SFC.conf @@ -46,7 +46,7 @@ ENS_VAR6_THRESH = lt152, lt305, lt914, lt1524, lt3048, ge914 ENS_VAR7_NAME = CAPE ENS_VAR7_LEVELS = L0 ENS_VAR7_THRESH = ge250, ge500, ge1000, ge2000 -ENS_VAR7_OPTIONS = GRIB_lvl_typ = 1 +ENS_VAR7_OPTIONS = GRIB_lvl_typ = 1; cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; ENS_VAR8_NAME = RH ENS_VAR8_LEVELS = Z2 @@ -54,7 +54,7 @@ ENS_VAR8_THRESH = le15, le20, le25, le30 ENS_VAR9_NAME = CAPE ENS_VAR9_LEVELS = P90-0 -ENS_VAR9_OPTIONS = cnt_thresh = [ >0 ] +ENS_VAR9_OPTIONS = cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; ENS_VAR9_THRESH = ge250, ge500, ge1000, ge2000 ENS_VAR10_NAME = GUST diff --git a/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SFC.conf b/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SFC.conf index bb57b19381..6ef019e1ab 100755 --- a/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SFC.conf +++ b/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SFC.conf @@ -45,7 +45,8 @@ FCST_VAR4_OPTIONS = GRIB_lvl_typ = 215 BOTH_VAR5_NAME = CAPE BOTH_VAR5_LEVELS = L0 BOTH_VAR5_THRESH = >=250, >=500, >=1000, >=2000 -FCST_VAR5_OPTIONS = GRIB_lvl_typ = 1 +FCST_VAR5_OPTIONS = GRIB_lvl_typ = 1; cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; +OBS_VAR5_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION; BOTH_VAR6_NAME = VIS BOTH_VAR6_LEVELS = L0 @@ -53,10 +54,10 @@ BOTH_VAR6_THRESH = <805, <1609, <4828, <8045, <16090, >=8045 FCST_VAR7_NAME = CAPE FCST_VAR7_LEVELS = P90-0 -FCST_VAR7_OPTIONS = cnt_thresh = [ >0 ] +FCST_VAR7_OPTIONS = cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; OBS_VAR7_NAME = MLCAPE OBS_VAR7_LEVELS = L0-100000 -OBS_VAR7_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = UNION +OBS_VAR7_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION; BOTH_VAR7_THRESH = >=250, >=500, >=1000, >=2000 BOTH_VAR8_NAME = TCDC diff --git a/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf b/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf index f086bdb355..78c19e3421 100755 --- a/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf +++ b/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf @@ -30,14 +30,15 @@ PROCESS_LIST = PointStat BOTH_VAR1_NAME = CAPE BOTH_VAR1_LEVELS = L0 BOTH_VAR1_THRESH = >=250, >=500, >=1000, >=2000 -FCST_VAR1_OPTIONS = GRIB_lvl_typ = 1 +FCST_VAR1_OPTIONS = GRIB_lvl_typ = 1; cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; +OBS_VAR1_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION; FCST_VAR2_NAME = CAPE FCST_VAR2_LEVELS = P0-90 -FCST_VAR2_OPTIONS = cnt_thresh = [ >0 ] +FCST_VAR2_OPTIONS = cnt_thresh = [ NA ] ; cnt_logic = INTERSECTION; OBS_VAR2_NAME = MLCAPE OBS_VAR2_LEVELS = L0-90000 -OBS_VAR2_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = UNION +OBS_VAR2_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION; BOTH_VAR2_THRESH = >=250, >=500, >=1000, >=2000 diff --git a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh index f84f49e011..e2c050210e 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh @@ -17,6 +17,7 @@ mkdir -p $save_dir mkdir -p $output_base_dir mkdir -p $DATA/logs + restart=$COMOUT/restart/$last_days/refs_cape_plots if [ ! -d $restart ] ; then mkdir -p $restart diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh index bcdf82529c..016a932845 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh @@ -394,7 +394,9 @@ for valid in 00z 03z 06z 09z 12z 15z 18z 21z ; do done #score_type done -tar -cvf evs.plots.refs.grid2obs.ctc.last${last_days}days.v${VDATE}.tar *.png +if [ -s *.png ] ; then + tar -cvf evs.plots.refs.grid2obs.ctc.last${last_days}days.v${VDATE}.tar *.png +fi # Cat the plotting log files log_dir="$DATA/logs" diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh index 1b3a062efe..814b5eaa77 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh @@ -284,7 +284,9 @@ done #stats done #valid -tar -cvf evs.plots.refs.grid2obs.ecnt.last${last_days}days.v${VDATE}.tar *.png +if [ -s *.png ] ; then + tar -cvf evs.plots.refs.grid2obs.ecnt.last${last_days}days.v${VDATE}.tar *.png +fi # Cat the plotting log files log_dir="$DATA/logs" diff --git a/scripts/plots/cam/exevs_refs_precip_plots.sh b/scripts/plots/cam/exevs_refs_precip_plots.sh index dd7e8df763..1033c124fd 100755 --- a/scripts/plots/cam/exevs_refs_precip_plots.sh +++ b/scripts/plots/cam/exevs_refs_precip_plots.sh @@ -335,7 +335,9 @@ for var in apcp_01 apcp_03 apcp_24 ; do done -tar -cvf evs.plots.refs.precip.last${last_days}days.v${VDATE}.tar *.png +if [ -s *.png ] ; then + tar -cvf evs.plots.refs.precip.last${last_days}days.v${VDATE}.tar *.png +fi # Cat the plotting log files log_dir="$DATA/logs" diff --git a/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh b/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh index 814612eb3e..9a38870d1f 100755 --- a/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh +++ b/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh @@ -77,8 +77,9 @@ export err=$?; err_chk cd $DATA/grid2grid_plots/plot_output/atmos.${VDATE}/precip/SL1L2_FBAR_24hrAccumMaps_CONUS_precip_spatial_map/images -tar -cvf evs.plots.refs.precip.spatial.map.v${VDATE}.tar *.gif - +if [ -s *.gif ] ; than + tar -cvf evs.plots.refs.precip.spatial.map.v${VDATE}.tar *.gif +fi # Cat the plotting log files log_dirs="$DATA/*/*/*/logs" diff --git a/scripts/plots/cam/exevs_refs_profile_plots.sh b/scripts/plots/cam/exevs_refs_profile_plots.sh index 65fa9286c5..23457ccf80 100755 --- a/scripts/plots/cam/exevs_refs_profile_plots.sh +++ b/scripts/plots/cam/exevs_refs_profile_plots.sh @@ -354,8 +354,9 @@ for valid in 00z 12z ; do done #stats done #vlaid - -tar -cvf evs.plots.refs.profile.last${last_days}days.v${VDATE}.tar *.png +if [ -s *.png ] ; then + tar -cvf evs.plots.refs.profile.last${last_days}days.v${VDATE}.tar *.png +fi # Cat the plotting log files log_dir="$DATA/logs" diff --git a/scripts/plots/cam/exevs_refs_snowfall_plots.sh b/scripts/plots/cam/exevs_refs_snowfall_plots.sh index 83f2db4723..880be95e8e 100755 --- a/scripts/plots/cam/exevs_refs_snowfall_plots.sh +++ b/scripts/plots/cam/exevs_refs_snowfall_plots.sh @@ -281,8 +281,9 @@ for var in weasd ; do done done - -tar -cvf evs.plots.refs.snowfall.last${last_days}days.v${VDATE}.tar *.png +if [ -s *.png ] ; then + tar -cvf evs.plots.refs.snowfall.last${last_days}days.v${VDATE}.tar *.png +fi # Cat the plotting log files log_dir="$DATA/logs" diff --git a/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh b/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh index 4b29e21960..e8b2fdef1a 100755 --- a/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh +++ b/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh @@ -175,19 +175,23 @@ for stats in csi_fbias ratio_pod_csi ; do echo "${DATA}/run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh #Save for restart - echo "if [ -s ${plot_dir}/${score_type}_regional_*_${valid_rst}_${var_rst}*.png ] ; then" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh - echo " cp -v ${plot_dir}/${score_type}_regional_*_${valid_rst}_${var_rst}*.png $restart" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh - echo " >$restart/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.completed" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh - echo "fi" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh + echo "for valid_rst in 00z 12z 00z_12z ; do" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh + echo " if [ -s ${plot_dir}/${score_type}_regional_*_\${valid_rst}*.png ] ; then" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh + echo " cp -v ${plot_dir}/${score_type}_regional_*_\${valid_rst}*.png $restart" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh + echo " >$restart/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.completed" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh + echo " fi" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh + echo "done" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh chmod +x run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh echo "${DATA}/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.sh" >> run_all_poe.sh else #Restart from png files of previous runs - if [ -s $restart/${score_type}_regional_*_${valid_rst}_${var_rst}*.png ] ; then - cp $restart/${score_type}_regional_*_${valid_rst}_${var_rst}*.png ${plot_dir}/. - fi + for valid_rst in 00z 12z 00z_12z ; do + if [ -s $restart/${score_type}_regional_*_valid_${var_rst}*.png ] ; then + cp $restart/${score_type}_regional_*_${valid_rst}*.png ${plot_dir}/. + fi + done fi done #end of line_type @@ -231,12 +235,16 @@ for domain in day1_mrgl day1_slgt day1_tstm day1_enh day1_mdt day1_high day2_mrg level=ml valid=valid_00z_12z fi - if ls lead_average_regional_${domain}_valid_all_times_${var}*.png 1> /dev/null 2>&1; then - mv lead_average_regional_${domain}_valid_all_times_${var}*.png evs.refs.csi_fbias.${var_new}_${level}.last${last_days}days.fhrmean_${valid}.${domain}.png - fi + for all_times in 00z 12z 00z_12z ; do + if ls lead_average_regional_${domain}_valid_${all_times}_${var}*.png 1> /dev/null 2>&1; then + mv lead_average_regional_${domain}_valid_${all_times}_${var}*.png evs.refs.csi_fbias.${var_new}_${level}.last${last_days}days.fhrmean_${valid}.${domain}.png + fi + done + if ls threshold_average_regional_${domain}_valid_*_${var}_csi*.png 1> /dev/null 2>&1; then mv threshold_average_regional_${domain}_valid_*_${var}_csi*.png evs.refs.csi.${var_new}_${level}.last${last_days}days.threshmean_${valid}.${domain}.png fi + if ls threshold_average_regional_${domain}_valid_*_${var}_fbias*.png 1> /dev/null 2>&1; then mv threshold_average_regional_${domain}_valid_*_${var}_fbias*.png evs.refs.fbias.${var_new}_${level}.last${last_days}days.threshmean_${valid}.${domain}.png fi @@ -247,8 +255,9 @@ for domain in day1_mrgl day1_slgt day1_tstm day1_enh day1_mdt day1_high day2_mrg done done - -tar -cvf evs.plots.refs.spcoutlook.last${last_days}days.v${VDATE}.tar *.png +if [ -s *.png ] ; then + tar -cvf evs.plots.refs.spcoutlook.last${last_days}days.v${VDATE}.tar *.png +fi # Cat the plotting log files log_dir="$DATA/logs" diff --git a/ush/cam/ush_refs_plot_py/df_preprocessing.py b/ush/cam/ush_refs_plot_py/df_preprocessing.py index ba6533011d..e3f95a3a09 100755 --- a/ush/cam/ush_refs_plot_py/df_preprocessing.py +++ b/ush/cam/ush_refs_plot_py/df_preprocessing.py @@ -169,11 +169,13 @@ def create_df(logger, stats_dir, pruned_data_dir, line_type, date_range, df.reset_index(drop=True, inplace=True) return df except UnboundLocalError as e: - logger.warning(e) - logger.warning( - "Nonexistent dataframe. Stats directory may be empty. Check the logfile for more details." + logger.error(e) + logger.error( + "FATAL ERROR: Nonexistent dataframe. Check for earlier warning " + + "or error messages." ) - return None + logger.error("Quitting ...") + sys.exit(1) def filter_by_level_type(df, logger, verif_type): if df is None: diff --git a/ush/cam/ush_refs_plot_py/lead_average.py b/ush/cam/ush_refs_plot_py/lead_average.py index 7946907dd4..dba361632f 100755 --- a/ush/cam/ush_refs_plot_py/lead_average.py +++ b/ush/cam/ush_refs_plot_py/lead_average.py @@ -188,12 +188,12 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, + f" obs thresholds.") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if not symbol_found: e = "FATAL ERROR: None of the requested obs thresholds contain a valid symbol." logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) df_obs_thresh_symbol, df_obs_thresh_letter = list( zip(*[ plot_util.format_thresh(t) @@ -243,12 +243,12 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, + f" fcst thresholds.") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if not symbol_found: e = "FATAL ERROR: None of the requested fcst thresholds contain a valid symbol." logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) df_fcst_thresh_symbol, df_fcst_thresh_letter = list( zip(*[plot_util.format_thresh(t) for t in df['FCST_THRESH']]) ) @@ -356,13 +356,57 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, plt.close(num) logger.info("========================================") return None - + + units = df['FCST_UNITS'].tolist()[0] + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'PROB_MXUPHL25_A24_GEHWT': + units = 'decimal' + metrics_using_var_units = [ + 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', + 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR','SPREAD', + 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' + ] + coef, const = (None, None) + unit_convert = False + if units in reference.unit_conversions: + unit_convert = True + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'HGT': + if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: + if units in ['m', 'gpm']: + units = 'gpm' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + unit_convert = False + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HGT']: + unit_convert = False + elif str(var_long_name_key).upper() == 'TMP' and level[0] == 'P': + unit_convert = False + elif any(field in str(var_long_name_key).upper() for field in ['WEASD', 'SNOD', 'ASNOW']): + if units in ['m']: + units = 'm_snow' + if unit_convert: + if metric2_name is not None: + if (str(metric1_name).upper() in metrics_using_var_units + and str(metric2_name).upper() in metrics_using_var_units): + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) + elif str(metric1_name).upper() in metrics_using_var_units: + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) # Calculate desired metric metric_long_names = [] for stat in [metric1_name, metric2_name]: if stat: stat_output = plot_util.calculate_stat( - logger, df_aggregated, str(stat).lower() + logger, df_aggregated, str(stat).lower(), [coef, const] ) df_aggregated[str(stat).upper()] = stat_output[0] metric_long_names.append(stat_output[2]) @@ -370,7 +414,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, ci_output = df_groups.apply( lambda x: plot_util.calculate_bootstrap_ci( logger, bs_method, x, str(stat).lower(), bs_nrep, - ci_lev, bs_min_samp + ci_lev, bs_min_samp, [coef, const] ) ) if any(ci_output['STATUS'] == 1): @@ -387,6 +431,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, ci_output = ( ci_output .reindex(df_aggregated.index) + #.reindex(ci_output.index) ) df_aggregated[str(stat).upper()+'_BLERR'] = ci_output[ 'CI_LOWER' @@ -407,18 +452,20 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, ] pivot_metric1 = pd.pivot_table( df_aggregated, values=str(metric1_name).upper(), columns='MODEL', - index='LEAD_HOURS' + index='LEAD_HOURS', dropna=False ) if sample_equalization: pivot_counts = pd.pivot_table( df_aggregated, values='COUNTS', columns='MODEL', index='LEAD_HOURS' ) + #pivot_metric1 = pivot_metric1.dropna() if metric2_name is not None: pivot_metric2 = pd.pivot_table( df_aggregated, values=str(metric2_name).upper(), columns='MODEL', - index='LEAD_HOURS' + index='LEAD_HOURS', dropna=False ) + #pivot_metric2 = pivot_metric2.dropna() if confidence_intervals: pivot_ci_lower1 = pd.pivot_table( df_aggregated, values=str(metric1_name).upper()+'_BLERR', @@ -911,27 +958,28 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, var_long_name_key = 'HPBL' var_long_name = variable_translator[var_long_name_key] units = df['FCST_UNITS'].tolist()[0] - if units in reference.unit_conversions: + if unit_convert: if fcst_thresh and '' not in fcst_thresh: fcst_thresh_labels = [float(tlab) for tlab in fcst_thresh_labels] fcst_thresh_labels = ( - reference.unit_conversions[units]['formula'](fcst_thresh_labels) + reference.unit_conversions[units]['formula']( + fcst_thresh_labels, + rounding=True + ) ) fcst_thresh_labels = [str(tlab) for tlab in fcst_thresh_labels] if obs_thresh and '' not in obs_thresh: obs_thresh_labels = [float(tlab) for tlab in obs_thresh_labels] obs_thresh_labels = ( - reference.unit_conversions[units]['formula'](obs_thresh_labels) + reference.unit_conversions[units]['formula']( + obs_thresh_labels, + rounding=True + ) ) obs_thresh_labels = [str(tlab) for tlab in obs_thresh_labels] units = reference.unit_conversions[units]['convert_to'] if units == '-': units = '' - metrics_using_var_units = [ - 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', - 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', - 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' - ] if metric2_name is not None: metric1_string, metric2_string = metric_long_names if (str(metric1_name).upper() in metrics_using_var_units @@ -973,8 +1021,9 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, bbox_to_anchor=(0.5, -0.08), ncol=4, frameon=True, numpoints=2, borderpad=.8, labelspacing=2., columnspacing=3., handlelength=3., handletextpad=.4, borderaxespad=.5) + #fig.subplots_adjust(bottom=.2, wspace=0, hspace=0) fig.subplots_adjust(bottom=.15, wspace=0, hspace=0) - fig.subplots_adjust(top=0.85) + fig.subplots_adjust(top=0.85) ax.grid( visible=True, which='major', axis='both', alpha=.5, linestyle='--', linewidth=.5, zorder=0 @@ -996,6 +1045,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, xytext=(-50, 21), textcoords='offset points', va='top', fontsize=11, color='dimgrey', ha='center' ) + #fig.subplots_adjust(top=.9) fig.subplots_adjust(top=.85) # Title @@ -1076,6 +1126,10 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, else: level_string = f'{level}' level_savename = f'{level}_' + if var_savename == 'ICEC_Z0_mean': + level_string = '' + if var_savename == 'TMP_Z0_mean': + level_string = 'Sea Surface ' if metric2_name is not None: title1 = f'{metric1_string} and {metric2_string}' else: @@ -1084,6 +1138,9 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, title1+=f' {interp_pts_string}' fcst_thresh_on = (fcst_thresh and '' not in fcst_thresh) obs_thresh_on = (obs_thresh and '' not in obs_thresh) + if metric1_string == 'Brier Score': + fcst_thresh_on = False + obs_thresh_on = False if fcst_thresh_on: fcst_thresholds_phrase = ', '.join([ f'{opt}{fcst_thresh_label}' @@ -1125,10 +1182,11 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, + f'{date_start_string} to {date_end_string}') title_center = '\n'.join([title1, title2, title3]) if sample_equalization: + #title_pad=40 title_pad=30 else: title_pad=None - ax.set_title(title_center, loc=plotter.title_loc, pad=title_pad) + ax.set_title(title_center, loc=plotter.title_loc, pad=title_pad) logger.info("... Plotting complete.") # Logos @@ -1166,9 +1224,14 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, # Saving models_savename = '_'.join([str(model) for model in model_list]) if len(date_hours) <= 8: - date_hours_savename='all_times' + date_hours_savename = '_'.join([ + f'{date_hour:02d}Z' for date_hour in date_hours + ]) else: - date_hours_savename='all_times' + date_hours_savename = '-'.join([ + f'{date_hour:02d}Z' + for date_hour in [date_hours[0], date_hours[-1]] + ]) date_start_savename = date_range[0].strftime('%Y%m%d') date_end_savename = date_range[1].strftime('%Y%m%d') if str(eval_period).upper() == 'TEST': @@ -1195,11 +1258,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, f'{str(time_period_savename).lower()}' ) if not os.path.isdir(save_subdir): - try: - os.makedirs(save_subdir) - except FileExistsError as e: - logger.warning(f"Several processes are making {save_subdir} at " - + f"the same time. Passing") + os.makedirs(save_subdir) save_path = os.path.join(save_subdir, save_name+'.png') fig.savefig(save_path, dpi=dpi) logger.info(u"\u2713"+f" plot saved successfully as {save_path}") @@ -1366,7 +1425,7 @@ def main(): if e: logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if (str(INTERP).upper() not in case_specs['interp'].replace(' ','').split(',')): e = (f"FATAL ERROR: The requested interp method is not valid for the" @@ -1374,7 +1433,7 @@ def main(): + f" line_type ({LINE_TYPE}): {INTERP}") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) for metric in metrics: if metric is not None: if (str(metric).lower() @@ -1385,7 +1444,7 @@ def main(): + f" line_type ({LINE_TYPE}): {metric}") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) for requested_var in VARIABLES: if requested_var in list(case_specs['var_dict'].keys()): var_specs = case_specs['var_dict'][requested_var] @@ -1442,7 +1501,7 @@ def main(): + f" size") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if (FCST_LEVELS[l] not in var_specs['fcst_var_levels'] or OBS_LEVELS[l] not in var_specs['obs_var_levels']): e = (f"The requested variable/level combination is not valid: " diff --git a/ush/cam/ush_refs_plot_py/lead_average_valid.py b/ush/cam/ush_refs_plot_py/lead_average_valid.py index 29543018ce..c79e690a27 100755 --- a/ush/cam/ush_refs_plot_py/lead_average_valid.py +++ b/ush/cam/ush_refs_plot_py/lead_average_valid.py @@ -188,12 +188,12 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, + f" obs thresholds.") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if not symbol_found: e = "FATAL ERROR: None of the requested obs thresholds contain a valid symbol." logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) df_obs_thresh_symbol, df_obs_thresh_letter = list( zip(*[ plot_util.format_thresh(t) @@ -243,12 +243,12 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, + f" fcst thresholds.") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if not symbol_found: e = "FATAL ERROR: None of the requested fcst thresholds contain a valid symbol." logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) df_fcst_thresh_symbol, df_fcst_thresh_letter = list( zip(*[plot_util.format_thresh(t) for t in df['FCST_THRESH']]) ) @@ -356,13 +356,57 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, plt.close(num) logger.info("========================================") return None - + + units = df['FCST_UNITS'].tolist()[0] + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'PROB_MXUPHL25_A24_GEHWT': + units = 'decimal' + metrics_using_var_units = [ + 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', + 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR','SPREAD', + 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' + ] + coef, const = (None, None) + unit_convert = False + if units in reference.unit_conversions: + unit_convert = True + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'HGT': + if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: + if units in ['m', 'gpm']: + units = 'gpm' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + unit_convert = False + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HGT']: + unit_convert = False + elif str(var_long_name_key).upper() == 'TMP' and level[0] == 'P': + unit_convert = False + elif any(field in str(var_long_name_key).upper() for field in ['WEASD', 'SNOD', 'ASNOW']): + if units in ['m']: + units = 'm_snow' + if unit_convert: + if metric2_name is not None: + if (str(metric1_name).upper() in metrics_using_var_units + and str(metric2_name).upper() in metrics_using_var_units): + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) + elif str(metric1_name).upper() in metrics_using_var_units: + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) # Calculate desired metric metric_long_names = [] for stat in [metric1_name, metric2_name]: if stat: stat_output = plot_util.calculate_stat( - logger, df_aggregated, str(stat).lower() + logger, df_aggregated, str(stat).lower(), [coef, const] ) df_aggregated[str(stat).upper()] = stat_output[0] metric_long_names.append(stat_output[2]) @@ -370,7 +414,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, ci_output = df_groups.apply( lambda x: plot_util.calculate_bootstrap_ci( logger, bs_method, x, str(stat).lower(), bs_nrep, - ci_lev, bs_min_samp + ci_lev, bs_min_samp, [coef, const] ) ) if any(ci_output['STATUS'] == 1): @@ -387,6 +431,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, ci_output = ( ci_output .reindex(df_aggregated.index) + #.reindex(ci_output.index) ) df_aggregated[str(stat).upper()+'_BLERR'] = ci_output[ 'CI_LOWER' @@ -407,18 +452,20 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, ] pivot_metric1 = pd.pivot_table( df_aggregated, values=str(metric1_name).upper(), columns='MODEL', - index='LEAD_HOURS' + index='LEAD_HOURS', dropna=False ) if sample_equalization: pivot_counts = pd.pivot_table( df_aggregated, values='COUNTS', columns='MODEL', index='LEAD_HOURS' ) + #pivot_metric1 = pivot_metric1.dropna() if metric2_name is not None: pivot_metric2 = pd.pivot_table( df_aggregated, values=str(metric2_name).upper(), columns='MODEL', - index='LEAD_HOURS' + index='LEAD_HOURS', dropna=False ) + #pivot_metric2 = pivot_metric2.dropna() if confidence_intervals: pivot_ci_lower1 = pd.pivot_table( df_aggregated, values=str(metric1_name).upper()+'_BLERR', @@ -915,27 +962,28 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, var_long_name_key = 'HPBL' var_long_name = variable_translator[var_long_name_key] units = df['FCST_UNITS'].tolist()[0] - if units in reference.unit_conversions: + if unit_convert: if fcst_thresh and '' not in fcst_thresh: fcst_thresh_labels = [float(tlab) for tlab in fcst_thresh_labels] fcst_thresh_labels = ( - reference.unit_conversions[units]['formula'](fcst_thresh_labels) + reference.unit_conversions[units]['formula']( + fcst_thresh_labels, + rounding=True + ) ) fcst_thresh_labels = [str(tlab) for tlab in fcst_thresh_labels] if obs_thresh and '' not in obs_thresh: obs_thresh_labels = [float(tlab) for tlab in obs_thresh_labels] obs_thresh_labels = ( - reference.unit_conversions[units]['formula'](obs_thresh_labels) + reference.unit_conversions[units]['formula']( + obs_thresh_labels, + rounding=True + ) ) obs_thresh_labels = [str(tlab) for tlab in obs_thresh_labels] units = reference.unit_conversions[units]['convert_to'] if units == '-': units = '' - metrics_using_var_units = [ - 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', - 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', - 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' - ] if metric2_name is not None: metric1_string, metric2_string = metric_long_names if (str(metric1_name).upper() in metrics_using_var_units @@ -977,8 +1025,9 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, bbox_to_anchor=(0.5, -0.08), ncol=4, frameon=True, numpoints=2, borderpad=.8, labelspacing=2., columnspacing=3., handlelength=3., handletextpad=.4, borderaxespad=.5) + #fig.subplots_adjust(bottom=.2, wspace=0, hspace=0) fig.subplots_adjust(bottom=.15, wspace=0, hspace=0) - fig.subplots_adjust(top=0.85) + fig.subplots_adjust(top=0.85) ax.grid( visible=True, which='major', axis='both', alpha=.5, linestyle='--', linewidth=.5, zorder=0 @@ -1000,6 +1049,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, xytext=(-50, 21), textcoords='offset points', va='top', fontsize=11, color='dimgrey', ha='center' ) + #fig.subplots_adjust(top=.9) fig.subplots_adjust(top=.85) # Title @@ -1080,6 +1130,10 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, else: level_string = f'{level}' level_savename = f'{level}_' + if var_savename == 'ICEC_Z0_mean': + level_string = '' + if var_savename == 'TMP_Z0_mean': + level_string = 'Sea Surface ' if metric2_name is not None: title1 = f'{metric1_string} and {metric2_string}' else: @@ -1088,6 +1142,9 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, title1+=f' {interp_pts_string}' fcst_thresh_on = (fcst_thresh and '' not in fcst_thresh) obs_thresh_on = (obs_thresh and '' not in obs_thresh) + if metric1_string == 'Brier Score': + fcst_thresh_on = False + obs_thresh_on = False if fcst_thresh_on: fcst_thresholds_phrase = ', '.join([ f'{opt}{fcst_thresh_label}' @@ -1129,10 +1186,11 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, + f'{date_start_string} to {date_end_string}') title_center = '\n'.join([title1, title2, title3]) if sample_equalization: + #title_pad=40 title_pad=30 else: title_pad=None - ax.set_title(title_center, loc=plotter.title_loc, pad=title_pad) + ax.set_title(title_center, loc=plotter.title_loc, pad=title_pad) logger.info("... Plotting complete.") # Logos @@ -1204,11 +1262,7 @@ def plot_lead_average(df: pd.DataFrame, logger: logging.Logger, f'{str(time_period_savename).lower()}' ) if not os.path.isdir(save_subdir): - try: - os.makedirs(save_subdir) - except FileExistsError as e: - logger.warning(f"Several processes are making {save_subdir} at " - + f"the same time. Passing") + os.makedirs(save_subdir) save_path = os.path.join(save_subdir, save_name+'.png') fig.savefig(save_path, dpi=dpi) logger.info(u"\u2713"+f" plot saved successfully as {save_path}") @@ -1375,7 +1429,7 @@ def main(): if e: logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if (str(INTERP).upper() not in case_specs['interp'].replace(' ','').split(',')): e = (f"FATAL ERROR: The requested interp method is not valid for the" @@ -1383,7 +1437,7 @@ def main(): + f" line_type ({LINE_TYPE}): {INTERP}") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) for metric in metrics: if metric is not None: if (str(metric).lower() @@ -1394,7 +1448,7 @@ def main(): + f" line_type ({LINE_TYPE}): {metric}") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) for requested_var in VARIABLES: if requested_var in list(case_specs['var_dict'].keys()): var_specs = case_specs['var_dict'][requested_var] @@ -1451,7 +1505,7 @@ def main(): + f" size") logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if (FCST_LEVELS[l] not in var_specs['fcst_var_levels'] or OBS_LEVELS[l] not in var_specs['obs_var_levels']): e = (f"The requested variable/level combination is not valid: " diff --git a/ush/cam/ush_refs_plot_py/performance_diagram.py b/ush/cam/ush_refs_plot_py/performance_diagram.py index 97a0316f0e..1beab7ee34 100755 --- a/ush/cam/ush_refs_plot_py/performance_diagram.py +++ b/ush/cam/ush_refs_plot_py/performance_diagram.py @@ -1,5 +1,6 @@ #! /usr/bin/env python3 + ############################################################################### # # Name: performance_diagram.py @@ -310,7 +311,7 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, metric_long_names = [] for metric_name in [metric1_name, metric2_name, metric3_name]: stat_output = plot_util.calculate_stat( - logger, df_aggregated, str(metric_name).lower() + logger, df_aggregated, str(metric_name).lower(), [None, None] ) df_aggregated[str(metric_name).upper()] = stat_output[0] metric_long_names.append(stat_output[2]) @@ -318,7 +319,7 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, ci_output = df_groups.apply( lambda x: plot_util.calculate_bootstrap_ci( logger, bs_method, x, str(metric_name).lower(), bs_nrep, - ci_lev, bs_min_samp + ci_lev, bs_min_samp, [None, None] ) ) if any(ci_output['STATUS'] == 1): @@ -515,6 +516,9 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, ) plt.close(num) logger.info("========================================") + print( + "Continuing due to missing data. Check the log file for details." + ) return None @@ -637,28 +641,48 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, logger.info("========================================") return None units = df['FCST_UNITS'].tolist()[0] + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'PROB_MXUPHL25_A24_GEHWT': + units = 'decimal' + unit_convert = False if units in reference.unit_conversions: - thresh_labels = [float(tlab) for tlab in thresh_labels] - thresh_labels = reference.unit_conversions[units]['formula'](thresh_labels) - thresh_diff_categories = np.array([ - [np.power(10., y)] - for y in [-5,-4,-3,-2,-1,0,1,2,3,4,5] - ]).flatten() - precision_scale_indiv_mult = [ - thresh_diff_categories[item] - for item in np.digitize(thresh_labels, thresh_diff_categories) - ] - precision_scale_collective_mult = 100/min(precision_scale_indiv_mult) - precision_scale = np.multiply( - precision_scale_indiv_mult, precision_scale_collective_mult - ) - thresh_labels = [ - f'{np.round(tlab)/precision_scale[t]}' - for t, tlab in enumerate( - np.multiply(thresh_labels, precision_scale) + unit_convert = True + if str(var_long_name_key).upper() == 'HGT': + if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: + if units in ['m', 'gpm']: + units = 'gpm' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + unit_convert = False + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HGT']: + unit_convert = False + elif any(field in str(var_long_name_key).upper() for field in ['WEASD', 'SNOD', 'ASNOW']): + if units in ['m']: + units = 'm_snow' + if unit_convert: + thresh_labels = [float(tlab) for tlab in thresh_labels] + thresh_labels = reference.unit_conversions[units]['formula']( + thresh_labels, + rounding=True ) - ] - units = reference.unit_conversions[units]['convert_to'] + thresh_diff_categories = np.array([ + [np.power(10., y)] + for y in [-5,-4,-3,-2,-1,0,1,2,3,4,5] + ]).flatten() + precision_scale_indiv_mult = [ + thresh_diff_categories[item] + for item in np.digitize(thresh_labels, thresh_diff_categories) + ] + precision_scale_collective_mult = 100/min(precision_scale_indiv_mult) + precision_scale = np.multiply( + precision_scale_indiv_mult, precision_scale_collective_mult + ) + thresh_labels = [ + f'{np.round(tlab)/precision_scale[t]}' + for t, tlab in enumerate( + np.multiply(thresh_labels, precision_scale) + ) + ] + units = reference.unit_conversions[units]['convert_to'] if units == '-': units = '' f = lambda m,c,ls,lw,ms,mec: plt.plot( @@ -783,7 +807,7 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, var_long_name = variable_translator[var_long_name_key] metrics_using_var_units = [ 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', - 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', + 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR','SPREAD', 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' ] ax.set_ylabel(f'{metric_long_names[1]}') @@ -810,6 +834,7 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, linewidth=.5, c='black', zorder=0 ) + #fig.subplots_adjust(bottom=.2, right=.77, left=.23, wspace=0, hspace=0) fig.subplots_adjust(bottom=.15, right=.77, left=.23, wspace=0, hspace=0) fig.subplots_adjust(top=0.85) cax = fig.add_axes([.775, .2, .01, .725]) @@ -908,6 +933,8 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, else: level_string = f'{level} ' level_savename = f'{level}_' + if var_savename == 'ICEC_Z0_mean': + level_string = '' thresholds_phrase = ', '.join([ f'{opt}{thresh_label}' for thresh_label in thresh_labels ]) @@ -994,11 +1021,7 @@ def plot_performance_diagram(df: pd.DataFrame, logger: logging.Logger, f'{str(time_period_savename).lower()}' ) if not os.path.isdir(save_subdir): - try: - os.makedirs(save_subdir) - except FileExistsError as e: - logger.warning(f"Several processes are making {save_subdir} at " - + f"the same time. Passing") + os.makedirs(save_subdir) save_path = os.path.join(save_subdir, save_name+'.png') fig.savefig(save_path, dpi=dpi) logger.info(u"\u2713"+f" plot saved successfully as {save_path}") @@ -1156,7 +1179,7 @@ def main(): if e: logger.error(e) logger.error("Quitting ...") - raise ValueError(e+"\nQuitting ...") + raise ValueError(e) if (str(INTERP).upper() not in case_specs['interp'].replace(' ','').split(',')): e = (f"FATAL ERROR: The requested interp method is not valid for the" diff --git a/ush/cam/ush_refs_plot_py/plot_util.py b/ush/cam/ush_refs_plot_py/plot_util.py index 09aa5d65b4..33ee928e5e 100755 --- a/ush/cam/ush_refs_plot_py/plot_util.py +++ b/ush/cam/ush_refs_plot_py/plot_util.py @@ -2,6 +2,7 @@ import pickle import os +import sys import datetime as datetime import time import numpy as np @@ -298,8 +299,8 @@ def get_stat_file_line_type_columns(logger, met_version, line_type): if met_version >= 12.0: stat_file_line_type_columns = [ 'TOTAL', 'UFBAR', 'VFBAR', 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR', 'F_SPEED_BAR', 'O_SPEED_BAR', - 'TOTAL_DIR','DIR_ME','DIR_MAE', 'DIR_MSE' + 'UVFFBAR', 'UVOOBAR', 'F_SPEED_BAR', 'O_SPEED_BAR', 'TOTAL_DIR', + 'DIR_ME', 'DIR_MAE', 'DIR_MSE' ] elif met_version >= 7.0: stat_file_line_type_columns = [ @@ -312,14 +313,64 @@ def get_stat_file_line_type_columns(logger, met_version, line_type): 'UVFFBAR', 'UVOOBAR' ] elif line_type == 'VAL1L2': - if met_version >= 6.0: - stat_file_line_type_columns = [ + if met_version >= 12.0: + stat_file_line_type_columns = [ + 'TOTAL', 'UFABAR', 'VFABAR', 'UOABAR', 'VOABAR', 'UVFOABAR', + 'UVFFABAR', 'UVOOABAR', 'FA_SPEED_BAR', 'OA_SPEED_BAR', + 'TOTAL_DIR', 'DIRA_ME', 'DIRA_MAE', 'DIRA_MSE' + ] + elif met_version >= 11.0: + stat_file_line_type_columns = [ + 'TOTAL', 'UFABAR', 'VFABAR', 'UOABAR', 'VOABAR', 'UVFOABAR', + 'UVFFABAR', 'UVOOABAR', 'FA_SPEED_BAR', 'OA_SPEED_BAR' + ] + elif met_version >= 6.0: + stat_file_line_type_columns = [ 'TOTAL', 'UFABAR', 'VFABAR', 'UOABAR', 'VOABAR', 'UVFOABAR', 'UVFFABAR', 'UVOOABAR' ] elif line_type == 'VCNT': - if met_version >= 7.0: - stat_file_line_type_columns = [ + if met_version >= 12.0: + stat_file_line_type_columns = [ + 'TOTAL', 'FBAR', 'FBAR_BCL', 'FBAR_BCU', 'OBAR', 'OBAR_BCL', + 'OBAR_BCU', 'FS_RMS', 'FS_RMS_BCL', 'FS_RMS_BCU', 'OS_RMS', + 'OS_RMS_BCL', 'OS_RMS_BCU', 'MSVE', 'MSVE_BCL', 'MSVE_BCU', + 'RMSVE', 'RMSVE_BCL', 'RMSVE_BCU', 'FSTDEV', 'FSTDEV_BCL', + 'FSTDEV_BCU', 'OSTDEV', 'OSTDEV_BCL', 'OSTDEV_BCU', 'FDIR', + 'FDIR_BCL', 'FDIR_BCU', 'ODIR', 'ODIR_BCL', 'ODIR_BCU', + 'FBAR_SPEED', 'FBAR_SPEED_BCL', 'FBAR_SPEED_BCU', 'OBAR_SPEED', + 'OBAR_SPEED_BCL', 'OBAR_SPEED_BCU', 'VDIFF_SPEED', + 'VDIFF_SPEED_BCL', 'VDIFF_SPEED_BCU', 'VDIFF_DIR', + 'VDIFF_DIR_BCL', 'VDIFF_DIR_BCU', 'SPEED_ERR', 'SPEED_ERR_BCL', + 'SPEED_ERR_BCU', 'SPEED_ABSERR', 'SPEED_ABSERR_BCL', + 'SPEED_ABSERR_BCU', 'DIR_ERR', 'DIR_ERR_BCL', 'DIR_ERR_BCU', + 'DIR_ABSERR', 'DIR_ABSERR_BCL', 'DIR_ABSERR_BCU', 'ANOM_CORR', + 'ANOM_CORR_NCL', 'ANOM_CORR_NCU', 'ANOM_CORR_BCL', 'ANOM_CORR_BCU', + 'ANOM_CORR_UNCNR', 'ANOM_CORR_UNCNTR_BCL', 'ANOM_CORR_UNCNTR_BCU', + 'TOTAL_DIR', 'DIR_ME', 'DIR_ME_BCL', 'DIR_ME_BCU', 'DIR_MAE', + 'DIR_MAE_BCL', 'DIR_MAE_BCU', 'DIR_MSE', 'DIR_MSE_BCL', + 'DIR_MSE_BCU', 'DIR_RMSE', 'DIR_RMSE_BCL', 'DIR_RMSE_BCU' + ] + elif met_version >= 11.0: + stat_file_line_type_columns = [ + 'TOTAL', 'FBAR', 'FBAR_BCL', 'FBAR_BCU', 'OBAR', 'OBAR_BCL', + 'OBAR_BCU', 'FS_RMS', 'FS_RMS_BCL', 'FS_RMS_BCU', 'OS_RMS', + 'OS_RMS_BCL', 'OS_RMS_BCU', 'MSVE', 'MSVE_BCL', 'MSVE_BCU', + 'RMSVE', 'RMSVE_BCL', 'RMSVE_BCU', 'FSTDEV', 'FSTDEV_BCL', + 'FSTDEV_BCU', 'OSTDEV', 'OSTDEV_BCL', 'OSTDEV_BCU', 'FDIR', + 'FDIR_BCL', 'FDIR_BCU', 'ODIR', 'ODIR_BCL', 'ODIR_BCU', + 'FBAR_SPEED', 'FBAR_SPEED_BCL', 'FBAR_SPEED_BCU', 'OBAR_SPEED', + 'OBAR_SPEED_BCL', 'OBAR_SPEED_BCU', 'VDIFF_SPEED', + 'VDIFF_SPEED_BCL', 'VDIFF_SPEED_BCU', 'VDIFF_DIR', + 'VDIFF_DIR_BCL', 'VDIFF_DIR_BCU', 'SPEED_ERR', 'SPEED_ERR_BCL', + 'SPEED_ERR_BCU', 'SPEED_ABSERR', 'SPEED_ABSERR_BCL', + 'SPEED_ABSERR_BCU', 'DIR_ERR', 'DIR_ERR_BCL', 'DIR_ERR_BCU', + 'DIR_ABSERR', 'DIR_ABSERR_BCL', 'DIR_ABSERR_BCU', 'ANOM_CORR', + 'ANOM_CORR_NCL', 'ANOM_CORR_NCU', 'ANOM_CORR_BCL', 'ANOM_CORR_BCU', + 'ANOM_CORR_UNCNT', 'ANOM_CORR_UNCNTR_BCL', 'ANOM_CORR_UNCNTR_BCU' + ] + elif met_version >= 7.0: + stat_file_line_type_columns = [ 'TOTAL', 'FBAR', 'FBAR_NCL', 'FBAR_NCU', 'OBAR', 'OBAR_NCL', 'OBAR_NCU', 'FS_RMS', 'FS_RMS_NCL', 'FS_RMS_NCU', 'OS_RMS', 'OS_RMS_NCL', 'OS_RMS_NCU', 'MSVE', 'MSVE_NCL', 'MSVE_NCU', @@ -334,14 +385,18 @@ def get_stat_file_line_type_columns(logger, met_version, line_type): 'SPEED_ABSERR_NCU', 'DIR_ERR', 'DIR_ERR_NCL', 'DIR_ERR_NCU', 'DIR_ABSERR', 'DIR_ABSERR_NCL', 'DIR_ABSERR_NCU' ] - else: - logger.error("FATAL ERROR: VCNT is not a valid LINE_TYPE in METV"+met_version) - exit(1) + else: + logger.error("FATAL ERROR: VCNT is not a valid LINE_TYPE in METV"+met_version) + exit(1) elif line_type == 'CTC': - if met_version >= 6.0: - stat_file_line_type_columns = [ + if met_version >= 11.0: + stat_file_line_type_columns = [ 'TOTAL', 'FY_OY', 'FY_ON', 'FN_OY', 'FN_ON', 'EC_VALUE' ] + elif met_version >= 6.0: + stat_file_line_type_columns = [ + 'TOTAL', 'FY_OY', 'FY_ON', 'FN_OY', 'FN_ON' + ] elif line_type == 'NBRCNT': if met_version >= 6.0: stat_file_line_type_columns = [ @@ -459,7 +514,7 @@ def calculate_average(logger, average_method, stat, model_dataframe, else: logger.error("FATAL ERROR: Invalid entry for MEAN_METHOD, " +"use MEAN, MEDIAN, or AGGREGATION") - exit(1) + sys.exit(1) return average_array def calculate_ci(logger, ci_method, modelB_values, modelA_values, total_days, @@ -568,7 +623,7 @@ def calculate_ci(logger, ci_method, modelB_values, modelA_values, total_days, else: logger.error("FATAL ERROR: Invalid entry for MAKE_CI_METHOD, " +"use EMC, EMC_MONTE_CARLO") - exit(1) + sys.exit(1) return intvl def get_stat_plot_name(logger, stat): @@ -677,9 +732,9 @@ def get_stat_plot_name(logger, stat): elif stat == 'hss': stat_plot_name = 'Heidke Skill Score' elif stat == 'crps': - stat_plot_name = 'CRPS' + stat_plot_name = 'Continuous Ranked Probability Score' elif stat == 'crpss': - stat_plot_name = 'CRPSS' + stat_plot_name = 'Continuous Ranked Probability Skill Score' elif stat == 'spread': stat_plot_name = 'Spread' elif stat == 'me': @@ -696,11 +751,11 @@ def get_stat_plot_name(logger, stat): stat_plot_name = 'Brier Skill Score' else: logger.error("FATAL ERROR: "+stat+" is not a valid option") - exit(1) + sys.exit(1) return stat_plot_name def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, - bs_min_samp): + bs_min_samp, conversion): """! Calculate the upper and lower bound bootstrap statistic from the data from the read in MET .stat file(s) @@ -740,6 +795,10 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, else: stat_values = model_data.loc[:]['TOTAL'] else: + if np.any(conversion): + bool_convert = True + else: + bool_convert = False if all(elem in model_data_columns for elem in ['FBAR', 'OBAR', 'MAE']): line_type = 'SL1L2' @@ -749,6 +808,28 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, fobar = model_data.loc[:]['FOBAR'] ffbar = model_data.loc[:]['FFBAR'] oobar = model_data.loc[:]['OOBAR'] + if bool_convert: + coef, const = conversion + fbar_og = fbar + obar_og = obar + fbar = coef*fbar_og+const + obar = coef*obar_og+const + fobar = ( + np.power(coef, 2)*fobar + + coef*const*fbar_og + + coef*const*obar_og + + np.power(const, 2) + ) + ffbar = ( + np.power(coef, 2)*ffbar + + 2.*coef*const*fbar_og + + np.power(const, 2) + ) + oobar = ( + np.power(coef, 2)*oobar + + 2.*coef*const*obar_og + + np.power(const, 2) + ) elif all(elem in model_data_columns for elem in ['FABAR', 'OABAR', 'MAE']): line_type = 'SAL1L2' @@ -758,6 +839,19 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, foabar = model_data.loc[:]['FOABAR'] ffabar = model_data.loc[:]['FFABAR'] ooabar = model_data.loc[:]['OOABAR'] + if bool_convert: + coef, const = conversion + fabar = coef*fabar + oabar = coef*oabar + foabar = ( + np.power(coef, 2)*foabar + ) + ffabar = ( + np.power(coef, 2)*ffabar + ) + ooabar = ( + np.power(coef, 2)*ooabar + ) elif all(elem in model_data_columns for elem in ['UFBAR', 'VFBAR']): line_type = 'VL1L2' @@ -769,6 +863,31 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, uvfobar = model_data.loc[:]['UVFOBAR'] uvffbar = model_data.loc[:]['UVFFBAR'] uvoobar = model_data.loc[:]['UVOOBAR'] + if bool_convert: + coef, const = conversion + ufbar_og = ufbar + vfbar_og = vfbar + uobar_og = uobar + vobar_og = vobar + ufbar = coef*ufbar_og+const + vfbar = coef*vfbar_og+const + uobar = coef*uobar_og+const + vobar = coef*vobar_og+const + uvfobar = ( + np.power(coef, 2)*uvfobar + + coef*const*(ufbar_og + uobar_og + vfbar_og + vobar_og) + + np.power(const, 2) + ) + uvffbar = ( + np.power(coef, 2)*uvffbar + + 2.*coef*const*(ufbar_og + vfbar_og) + + np.power(const, 2) + ) + uvoobar = ( + np.power(coef, 2)*uvoobar + + 2.*coef*const*(uobar_og + vobar_og) + + np.power(const, 2) + ) elif all(elem in model_data_columns for elem in ['UFABAR', 'VFABAR']): line_type = 'VAL1L2' @@ -780,6 +899,21 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, uvfoabar = model_data.loc[:]['UVFOABAR'] uvffabar = model_data.loc[:]['UVFFABAR'] uvooabar = model_data.loc[:]['UVOOABAR'] + if bool_convert: + coef, const = conversion + ufabar = coef*ufabar + vfabar = coef*vfabar + uoabar = coef*uoabar + voabar = coef*voabar + uvfoabar = ( + np.power(coef, 2)*uvfoabar + ) + uvffabar = ( + np.power(coef, 2)*uvffabar + ) + uvooabar = ( + np.power(coef, 2)*uvooabar + ) elif all(elem in model_data_columns for elem in ['VDIFF_SPEED', 'VDIFF_DIR']): line_type = 'VCNT' @@ -800,6 +934,11 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, vdiff_dir = model_data.loc[:]['VDIFF_DIR'] speed_err = model_data.loc[:]['SPEED_ERR'] dir_err = model_data.loc[:]['DIR_ERR'] + if bool_convert: + logger.error( + f"FATAL ERROR: Cannot convert columns for line_type \"{line_type}\"" + ) + exit(1) elif all(elem in model_data_columns for elem in ['FY_OY', 'FN_ON']): line_type = 'CTC' @@ -829,9 +968,15 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, spread = model_data.loc[:]['SPREAD'] me = model_data.loc[:]['ME'] mae = model_data.loc[:]['MAE'] + if bool_convert: + coef, const = conversion + rmse = coef*rmse + spread = coef*spread + me = coef*me + mae = np.abs(coef)*mae else: logger.error("FATAL ERROR: Could not recognize line type from columns") - exit(1) + sys.exit(1) if str(bs_method).upper() == 'MATCHED_PAIRS': if total.sum() < bs_min_samp: logger.warning(f"Sample too small for bootstrapping. (Matched pairs" @@ -930,7 +1075,7 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, oobar_est_samp = np.concatenate((oobar_est_samples)) else: logger.error("FATAL ERROR: "+line_type+" is not currently a valid option") - exit(1) + sys.exit(1) elif str(bs_method).upper() == 'FORECASTS': if total.size < bs_min_samp: logger.warning(f"Sample too small for bootstrapping. (Forecasts" @@ -1157,10 +1302,10 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, me_est_samp = np.concatenate((me_samples)) else: logger.error("FATAL ERROR: "+line_type+" is not currently a valid option") - exit(1) + sys.exit(1) else: logger.error("FATAL ERROR: "+bs_method+" is not a valid option") - exit(1) + sys.exit(1) if stat == 'bias': if str(bs_method).upper() in ['MATCHED_PAIRS','FORECASTS']: if line_type == 'SL1L2': @@ -1466,7 +1611,7 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, stat_values = (fy_oy_samp + fn_on_samp - C)/(total - C) else: logger.error("FATAL ERROR: "+stat+" is not a valid option") - exit(1) + sys.exit(1) stat_deltas = stat_values-stat_values_mean stat_ci_lower = np.nanpercentile(stat_deltas, lower_pctile) stat_ci_upper = np.nanpercentile(stat_deltas, upper_pctile) @@ -1474,7 +1619,7 @@ def calculate_bootstrap_ci(logger, bs_method, model_data, stat, nrepl, level, dict(CI_LOWER=[stat_ci_lower], CI_UPPER=[stat_ci_upper], STATUS=[status]) ) -def calculate_stat(logger, model_data, stat): +def calculate_stat(logger, model_data, stat, conversion): """! Calculate the statistic from the data from the read in MET .stat file(s) @@ -1487,7 +1632,6 @@ def calculate_stat(logger, model_data, stat): Returns: stat_values - Dataframe of the statistic values - stat_values_array - array of the statistic values stat_plot_name - string of the formal statistic name being plotted """ @@ -1503,6 +1647,10 @@ def calculate_stat(logger, model_data, stat): else: stat_values = model_data.loc[:]['TOTAL'] else: + if np.any(conversion): + bool_convert = True + else: + bool_convert = False if all(elem in model_data_columns for elem in ['FBAR', 'OBAR', 'MAE']): line_type = 'SL1L2' @@ -1512,6 +1660,28 @@ def calculate_stat(logger, model_data, stat): ffbar = model_data.loc[:]['FFBAR'] oobar = model_data.loc[:]['OOBAR'] mae = model_data.loc[:]['MAE'] + if bool_convert: + coef, const = conversion + fbar_og = fbar + obar_og = obar + fbar = coef*fbar_og+const + obar = coef*obar_og+const + fobar = ( + np.power(coef, 2)*fobar + + coef*const*fbar_og + + coef*const*obar_og + + np.power(const, 2) + ) + ffbar = ( + np.power(coef, 2)*ffbar + + 2.*coef*const*fbar_og + + np.power(const, 2) + ) + oobar = ( + np.power(coef, 2)*oobar + + 2.*coef*const*obar_og + + np.power(const, 2) + ) elif all(elem in model_data_columns for elem in ['FABAR', 'OABAR', 'MAE']): line_type = 'SAL1L2' @@ -1520,6 +1690,19 @@ def calculate_stat(logger, model_data, stat): foabar = model_data.loc[:]['FOABAR'] ffabar = model_data.loc[:]['FFABAR'] ooabar = model_data.loc[:]['OOABAR'] + if bool_convert: + coef, const = conversion + fabar = coef*fabar + oabar = coef*oabar + foabar = ( + np.power(coef, 2)*foabar + ) + ffabar = ( + np.power(coef, 2)*ffabar + ) + ooabar = ( + np.power(coef, 2)*ooabar + ) elif all(elem in model_data_columns for elem in ['UFBAR', 'VFBAR']): line_type = 'VL1L2' @@ -1530,6 +1713,31 @@ def calculate_stat(logger, model_data, stat): uvfobar = model_data.loc[:]['UVFOBAR'] uvffbar = model_data.loc[:]['UVFFBAR'] uvoobar = model_data.loc[:]['UVOOBAR'] + if bool_convert: + coef, const = conversion + ufbar_og = ufbar + vfbar_og = vfbar + uobar_og = uobar + vobar_og = vobar + ufbar = coef*ufbar_og+const + vfbar = coef*vfbar_og+const + uobar = coef*uobar_og+const + vobar = coef*vobar_og+const + uvfobar = ( + np.power(coef, 2)*uvfobar + + coef*const*(ufbar_og + uobar_og + vfbar_og + vobar_og) + + np.power(const, 2) + ) + uvffbar = ( + np.power(coef, 2)*uvffbar + + 2.*coef*const*(ufbar_og + vfbar_og) + + np.power(const, 2) + ) + uvoobar = ( + np.power(coef, 2)*uvoobar + + 2.*coef*const*(uobar_og + vobar_og) + + np.power(const, 2) + ) elif all(elem in model_data_columns for elem in ['UFABAR', 'VFABAR']): line_type = 'VAL1L2' @@ -1540,6 +1748,21 @@ def calculate_stat(logger, model_data, stat): uvfoabar = model_data.loc[:]['UVFOABAR'] uvffabar = model_data.loc[:]['UVFFABAR'] uvooabar = model_data.loc[:]['UVOOABAR'] + if bool_convert: + coef, const = conversion + ufabar = coef*ufabar + vfabar = coef*vfabar + uoabar = coef*uoabar + voabar = coef*voabar + uvfoabar = ( + np.power(coef, 2)*uvfoabar + ) + uvffabar = ( + np.power(coef, 2)*uvffabar + ) + uvooabar = ( + np.power(coef, 2)*uvooabar + ) elif all(elem in model_data_columns for elem in ['VDIFF_SPEED', 'VDIFF_DIR']): line_type = 'VCNT' @@ -1559,6 +1782,11 @@ def calculate_stat(logger, model_data, stat): vdiff_dir = model_data.loc[:]['VDIFF_DIR'] speed_err = model_data.loc[:]['SPEED_ERR'] dir_err = model_data.loc[:]['DIR_ERR'] + if bool_convert: + logger.error( + f"FATAL ERROR: Cannot convert column units for line_type \"{line_type}\"" + ) + exit(1) elif all(elem in model_data_columns for elem in ['FY_OY', 'FN_ON']): line_type = 'CTC' @@ -1586,7 +1814,13 @@ def calculate_stat(logger, model_data, stat): rmse = model_data.loc[:]['RMSE'] spread = model_data.loc[:]['SPREAD'] me = model_data.loc[:]['ME'] - mae = model_data.loc[:]['MAE'] + mae = model_data.loc[:]['MAE'] + if bool_convert: + coef, const = conversion + rmse = coef*rmse + spread = coef*spread + me = coef*me + mae = np.abs(coef)*mae elif all(elem in model_data_columns for elem in ['ROC_AUC', 'BRIER', 'BSS', 'BSS_SMPL']): line_type = 'PSTD' @@ -1597,7 +1831,7 @@ def calculate_stat(logger, model_data, stat): bss_smpl = model_data.loc[:]['BSS_SMPL'] else: logger.error("FATAL ERROR: Could not recognize line type from columns") - exit(1) + sys.exit(1) stat_plot_name = get_stat_plot_name(logger, stat) if stat == 'bias': if line_type == 'SL1L2': @@ -1833,98 +2067,9 @@ def calculate_stat(logger, model_data, stat): stat_values = (fy_oy + fn_on - C)/(total - C) else: logger.error("FATAL ERROR: "+stat+" is not a valid option") - exit(1) + sys.exit(1) nindex = stat_values.index.nlevels - if stat == 'fbar_obar' or stat == 'orate_frate' or stat == 'baser_frate': - try: - if nindex == 1: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0) - ) - ) - elif nindex == 2: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - index1 = len(stat_values_fbar.index.get_level_values(1).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0, index1) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - index1 = len(stat_values_obar.index.get_level_values(1).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0, index1) - ) - ) - elif nindex == 3: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - index1 = len(stat_values_fbar.index.get_level_values(1).unique()) - index2 = len(stat_values_fbar.index.get_level_values(2).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0, index1, index2) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - index1 = len(stat_values_obar.index.get_level_values(1).unique()) - index2 = len(stat_values_obar.index.get_level_values(2).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0, index1, index2) - ) - ) - stat_values_array = np.ma.array([stat_values_array_fbar, - stat_values_array_obar]) - except ValueError as e: - logger.warning(e) - logger.warning("This is usually OK, and will happen if " - + "event_equalization=False.") - logger.warning("Setting stat_values_array to Nonetype.") - stat_values_array = None - logger.warning("Continuing ...") - else: - try: - if nindex == 1: - index0 = len(stat_values.index.get_level_values(0).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1, index0) - ) - ) - elif nindex == 2: - index0 = len(stat_values.index.get_level_values(0).unique()) - index1 = len(stat_values.index.get_level_values(1).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1, index0, index1) - ) - ) - elif nindex == 3: - index0 = len(stat_values.index.get_level_values(0).unique()) - index1 = len(stat_values.index.get_level_values(1).unique()) - index2 = len(stat_values.index.get_level_values(2).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1, index0, index1, index2) - ) - ) - except ValueError as e: - logger.warning(e) - logger.warning("This is usually OK, and will happen if " - + "event_equalization=False.") - logger.warning("Setting stat_values_array to Nonetype.") - stat_values_array = None - logger.warning("Continuing ...") - return stat_values, stat_values_array, stat_plot_name + return stat_values, None, stat_plot_name def get_lead_avg_file(stat, input_filename, fcst_lead, output_base_dir): lead_avg_filename = stat + '_' + os.path.basename(input_filename) \ diff --git a/ush/cam/ush_refs_plot_py/prune_stat_files.py b/ush/cam/ush_refs_plot_py/prune_stat_files.py index 882ba852a8..29232ec633 100755 --- a/ush/cam/ush_refs_plot_py/prune_stat_files.py +++ b/ush/cam/ush_refs_plot_py/prune_stat_files.py @@ -100,11 +100,12 @@ def prune_data(data_dir, prune_dir, tmp_dir, output_base_template, valid_range, ) # Prune the MET .stat files and write to new file for met_stat_file in met_stat_files: - ps = subprocess.Popen('grep -R "'+model+'" '+met_stat_file+filter_cmd, - shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, encoding='UTF-8') - grep_output = ps.communicate()[0] + grep = subprocess.run('grep -R "'+model+'" '+met_stat_file+filter_cmd, + shell=True, capture_output=True, encoding="utf8") + grep_output = grep.stdout + all_grep_output = all_grep_output+grep_output + pruned_met_stat_file = os.path.join(pruned_data_dir, model+'.stat') with open(pruned_met_stat_file, 'w') as pmsf: diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_util.py b/ush/cam/ush_refs_plot_py/refs_atmos_util.py index 775351296d..0295a7bd02 100755 --- a/ush/cam/ush_refs_plot_py/refs_atmos_util.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_util.py @@ -1879,13 +1879,14 @@ def condense_model_stat_files(logger, input_dir, output_file, model, obs, ) for model_stat_file in model_stat_files: logger.debug(f"Getting data from {model_stat_file}") - ps = subprocess.Popen( + grep = subprocess.run( 'grep -R "'+model+' " '+model_stat_file+grep_opts, - shell=True, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, encoding='UTF-8' + shell=True, capture_output=True, encoding="utf8" ) logger.debug(f"Ran {ps.args}") - all_grep_output = all_grep_output+ps.communicate()[0] + + all_grep_output = all_grep_output+grep.stdout + logger.debug(f"Condensed {model} .stat file at " +f"{output_file}") with open(output_file, 'w') as f: diff --git a/ush/cam/ush_refs_plot_py/settings.py b/ush/cam/ush_refs_plot_py/settings.py index 7eebec22f9..7adb13ec51 100755 --- a/ush/cam/ush_refs_plot_py/settings.py +++ b/ush/cam/ush_refs_plot_py/settings.py @@ -702,6 +702,10 @@ def __init__(self): 'convert_to': 'kt', 'formula': self.formulas.mps_to_kt }, + 'Pa': { + 'convert_to': 'hPa', + 'formula': self.formulas.PA_to_hPa + }, } ''' @@ -804,6 +808,7 @@ def __init__(self): 'NHEM': 'Northern Hemisphere 20N-80N', 'SHEM': 'Southern Hemisphere 20S-80S', 'TRO': 'Tropics 20S-20N', + 'TROPICS': 'Tropics 20S-20N', 'PNA': 'Pacific North America', 'N60': '60N-90N', 'S60': '60S-90S', @@ -3584,17 +3589,121 @@ def __init__(self): }, } + ''' + Each formula in the formulas class needs to have the capability of providing three things. + 1) Simply, the complete conversion of the input quantity into the output units + 2) The rounded conversion of the input to the output (e.g., for displaying some thresholds) + 3) The coefficient and the constant that composes the formula (for converting base stats) + ''' class formulas(): - def mm_to_mm(mm_vals): - inch_vals = np.divide(mm_vals, 1.0) - return inch_vals - def K_to_F(K_vals): - F_vals = (((np.array(K_vals)-273.15)*9./5.)+32.).round() - return F_vals - def C_to_F(C_vals): - F_vals = (np.array(C_vals)*9./5.)+32. - return F_vals - def mps_to_kt(mps_vals): - kt_vals = np.array(mps_vals)*1.94384449412 - return kt_vals + def mm_to_in(mm_vals, rounding=False, return_terms=False): + if return_terms: + M = np.divide(1., 25.4) + C = 0. + return M, C + else: + if rounding: + inch_vals = np.divide(mm_vals, 25.4).round(decimals=2) + else: + inch_vals = np.divide(mm_vals, 25.4) + return inch_vals + def mm_to_mm(mm_vals, rounding=False, return_terms=False): + if return_terms: + M = 1. + C = 0. + return M, C + else: + if rounding: + inch_vals = np.divide(mm_vals, 1.).round(decimals=2) + else: + inch_vals = np.divide(mm_vals, 1.) + return inch_vals + def K_to_F(K_vals, rounding=False, return_terms=False): + if return_terms: + M = np.divide(9., 5.) + C = ((-273.15)*9./5.)+32. + return M, C + else: + if rounding: + F_vals = (((np.array(K_vals)-273.15)*9./5.)+32.).round() + else: + F_vals = ((np.array(K_vals)-273.15)*9./5)+32. + return F_vals + def C_to_F(C_vals, rounding=False, return_terms=False): + if return_terms: + M = np.divide(9., 5.) + C = 32. + return M, C + else: + if rounding: + F_vals = ((np.array(C_vals)*9./5.)+32.).round() + else: + F_vals = (np.array(C_vals)*9./5.)+32. + return F_vals + def mps_to_kt(mps_vals, rounding=False, return_terms=False): + if return_terms: + M = 1.94384449412 + C = 0. + return M, C + else: + if rounding: + kt_vals = (np.multiply(mps_vals, 1.94384449412)).round() + else: + kt_vals = np.multiply(mps_vals, 1.94384449412) + return kt_vals + def PA_to_hPa(PA_vals, rounding=False, return_terms=False): + if return_terms: + M = 0.01 + C = 0. + return M, C + else: + if rounding: + hPa_vals = (np.array(PA_vals) * 0.01).round() + else: + hPa_vals = np.array(PA_vals) * 0.01 + return hPa_vals + def gpm_to_kft(gpm_vals, rounding=False, return_terms=False): + if return_terms: + M = np.divide(1., 304.8) + C = 0. + return M, C + else: + if rounding: + kft_vals = (np.divide(gpm_vals, 304.8)).round(decimals=2) + else: + kft_vals = np.divide(gpm_vals, 304.8) + return kft_vals + def m_to_mi(m_vals, rounding=False, return_terms=False): + if return_terms: + M = np.divide(1., 1609.34) + C = 0. + return M, C + else: + if rounding: + mi_vals = (np.divide(m_vals, 1609.34)).round(decimals=2) + else: + mi_vals = np.divide(m_vals, 1609.34) + return mi_vals + def m_snow_to_in(m_vals, rounding=False, return_terms=False): + if return_terms: + M = 39.3701 + C = 0. + return M, C + else: + if rounding: + in_vals = (np.multiply(m_vals, 39.37)).round(decimals=2) + else: + in_vals = np.multiply(m_vals, 39.37) + return in_vals + def dec_to_perc(dec_vals, rounding=False, return_terms=False): + if return_terms: + M = 100. + C = 0. + return M, C + else: + if rounding: + perc_vals = (np.multiply(dec_vals, 100.)).round() + else: + perc_vals = np.multiply(dec_vals, 100.) + return perc_vals diff --git a/ush/cam/ush_refs_plot_py/stat_by_level.py b/ush/cam/ush_refs_plot_py/stat_by_level.py index daca78d7f7..a56aeefa1f 100755 --- a/ush/cam/ush_refs_plot_py/stat_by_level.py +++ b/ush/cam/ush_refs_plot_py/stat_by_level.py @@ -235,12 +235,53 @@ def plot_stat_by_level(df: pd.DataFrame, logger: logging.Logger, logger.info("========================================") return None + coef, const = (None, None) + units = df['FCST_UNITS'].tolist()[0] + metrics_using_var_units = [ + 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', + 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR','SPREAD', + 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' + ] + unit_convert = False + if units in reference.unit_conversions: + unit_convert = True + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'HGT': + if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: + if units in ['m', 'gpm']: + units = 'gpm' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + unit_convert = False + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HGT']: + unit_convert = False + elif any(field in str(var_long_name_key).upper() for field in ['WEASD', 'SNOD', 'ASNOW']): + if units in ['m']: + units = 'm_snow' + elif str(var_long_name_key).upper() == 'TMP': + unit_convert = False + if unit_convert: + if metric2_name is not None: + if (str(metric1_name).upper() in metrics_using_var_units + and str(metric2_name).upper() in metrics_using_var_units): + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) + elif str(metric1_name).upper() in metrics_using_var_units: + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) # Calculate desired metrics metric_long_names = [] for stat in [metric1_name, metric2_name]: if stat: stat_output = plot_util.calculate_stat( - logger, df_aggregated, str(stat).lower() + logger, df_aggregated, str(stat).lower(), [coef, const] ) df_aggregated[str(stat).upper()] = stat_output[0] metric_long_names.append(stat_output[2]) @@ -248,7 +289,7 @@ def plot_stat_by_level(df: pd.DataFrame, logger: logging.Logger, ci_output = df_groups.apply( lambda x: plot_util.calculate_bootstrap_ci( logger, bs_method, x, str(stat).lower(), bs_nrep, - ci_lev, bs_min_samp + ci_lev, bs_min_samp, [coef, const] ) ) if any(ci_output['STATUS'] == 1): @@ -704,12 +745,13 @@ def plot_stat_by_level(df: pd.DataFrame, logger: logging.Logger, np.digitize(x_range, x_range_categories[:-1]) ] xlim_min = np.floor(x_min/round_to_nearest)*round_to_nearest - xlim_max = np.ceil(x_max/round_to_nearest)*round_to_nearest + xlim_max = round(np.ceil(x_max/round_to_nearest)*round_to_nearest, len(str(round_to_nearest))-1) if len(str(xlim_min)) > 5 and np.abs(xlim_min) < 1.: xlim_min = float( np.format_float_scientific(xlim_min, unique=False, precision=3) ) - xticks = np.arange(xlim_min, xlim_max+round_to_nearest, round_to_nearest) + xticks_og = np.arange(xlim_min, xlim_max+round_to_nearest, round_to_nearest) + xticks = [round(xtick,len(str(round_to_nearest))-1) for xtick in xticks_og] if any([len(str(xtick)) > 5 and np.abs(xtick) < 1. for xtick in xticks]): xtick_labels = [] for xtick in xticks: @@ -735,16 +777,16 @@ def plot_stat_by_level(df: pd.DataFrame, logger: logging.Logger, elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: var_long_name_key = 'HPBL' var_long_name = variable_translator[var_long_name_key] - units = df['FCST_UNITS'].tolist()[0] if units in reference.unit_conversions: + do_unit_conversion = True + if var_long_name_key == 'TMP': + do_unit_conversion = False + else: + do_unit_conversion = False + if do_unit_conversion: units = reference.unit_conversions[units]['convert_to'] if units == '-': units = '' - metrics_using_var_units = [ - 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', - 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', - 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' - ] if metric2_name is not None: metric1_string, metric2_string = metric_long_names if (str(metric1_name).upper() in metrics_using_var_units @@ -834,7 +876,7 @@ def plot_stat_by_level(df: pd.DataFrame, logger: logging.Logger, fontsize=11, color='dimgrey', ha='right' ) fig.subplots_adjust(right=.95) - fig.subplots_adjust(top=0.85) + fig.subplots_adjust(top=.85) # Title domain = df['VX_MASK'].tolist()[0] @@ -944,11 +986,7 @@ def plot_stat_by_level(df: pd.DataFrame, logger: logging.Logger, f'{str(time_period_savename).lower()}' ) if not os.path.isdir(save_subdir): - try: - os.makedirs(save_subdir) - except FileExistsError as e: - logger.warning(f"Several processes are making {save_subdir} at " - + f"the same time. Passing") + os.makedirs(save_subdir) save_path = os.path.join(save_subdir, save_name+'.png') fig.savefig(save_path, dpi=dpi) logger.info(u"\u2713"+f" plot saved successfully as {save_path}") diff --git a/ush/cam/ush_refs_plot_py/threshold_average.py b/ush/cam/ush_refs_plot_py/threshold_average.py index 5cf64af6c0..bfe5e7b4ec 100755 --- a/ush/cam/ush_refs_plot_py/threshold_average.py +++ b/ush/cam/ush_refs_plot_py/threshold_average.py @@ -192,6 +192,7 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, requested_thresh_symbol, requested_thresh_letter = list( zip(*[plot_util.format_thresh(t) for t in thresh]) ) + requested_thresh_value = [float(str(item)[2:]) for item in requested_thresh_letter] symbol_found = False for opt in ['>=', '>', '==','!=','<=', '<']: if any(opt in t for t in requested_thresh_symbol): @@ -297,9 +298,43 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, logger.info("========================================") return None + units = df['FCST_UNITS'].tolist()[0] + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'PROB_MXUPHL25_A24_GEHWT': + units = 'decimal' + metrics_using_var_units = [ + 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', + 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', + 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' + ] + coef, const = (None, None) + unit_convert = False + if units in reference.unit_conversions: + unit_convert = True + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'HGT': + if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: + if units in ['m', 'gpm']: + units = 'gpm' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + unit_convert = False + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HGT']: + unit_convert = False + elif any(field in str(var_long_name_key).upper() for field in ['WEASD', 'SNOD', 'ASNOW']): + if units in ['m']: + units = 'm_snow' + if unit_convert: + if str(metric_name).upper() in metrics_using_var_units: + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) + # Calculate desired metric stat_output = plot_util.calculate_stat( - logger, df_aggregated, str(metric_name).lower() + logger, df_aggregated, str(metric_name).lower(), [coef, const] ) df_aggregated[str(metric_name).upper()] = stat_output[0] metric_long_name = stat_output[2] @@ -307,7 +342,7 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, ci_output = df_groups.apply( lambda x: plot_util.calculate_bootstrap_ci( logger, bs_method, x, str(metric_name).lower(), bs_nrep, - ci_lev, bs_min_samp + ci_lev, bs_min_samp, [coef, const] ) ) if any(ci_output['STATUS'] == 1): @@ -342,7 +377,7 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, ] pivot_metric = pd.pivot_table( - df_aggregated, values=str(metric_name).upper(), columns='MODEL', + df_aggregated, values=str(metric_name).upper(), columns='MODEL', index='FCST_THRESH_VALUE' ) if sample_equalization: @@ -372,7 +407,6 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, plt.close(num) logger.info("========================================") return None - models_renamed = [] count_renamed = 1 for requested_model in model_list: @@ -443,9 +477,15 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, pivot_counts = pivot_counts[pivot_counts.index.isin(indices_in_common)] units = df['FCST_UNITS'].tolist()[0] x_vals = pivot_metric.index.astype(float).tolist() - if units in reference.unit_conversions: - x_vals = reference.unit_conversions[units]['formula'](x_vals) - units = reference.unit_conversions[units]['convert_to'] + if unit_convert: + x_vals = reference.unit_conversions[units]['formula']( + x_vals, + rounding=True + ) + requested_thresh_value = reference.unit_conversions[units]['formula']( + requested_thresh_value, + rounding=True + ) if units == '-': units = '' x_vals_argsort = np.argsort(x_vals) @@ -549,7 +589,8 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, # Configure axis ticks if units in reference.unit_conversions: - x_vals_incr = reference.unit_conversions[units]['formulas'](x_vals) + x_vals_incr = reference.unit_conversions[units]['formula'](x_vals) + units = reference.unit_conversions[units]['convert_to'] xticks_min = np.min(x_vals) xticks_max = np.max(x_vals) @@ -638,17 +679,12 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: var_long_name_key = 'HPBL' var_long_name = variable_translator[var_long_name_key] - metrics_using_var_units = [ - 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', - 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', - 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' - ] if str(metric_name).upper() in metrics_using_var_units: if units: ylabel = f'{var_long_name} ({units})' else: ylabel = f'{var_long_name} (unitless)' - else: + else: ylabel = f'{metric_long_name}' ax.set_ylim(ylim_min, ylim_max) ax.set_ylabel(ylabel) @@ -859,11 +895,7 @@ def plot_threshold_average(df: pd.DataFrame, logger: logging.Logger, f'{str(time_period_savename).lower()}' ) if not os.path.isdir(save_subdir): - try: - os.makedirs(save_subdir) - except FileExistsError as e: - logger.warning(f"Several processes are making {save_subdir} at " - + f"the same time. Passing") + os.makedirs(save_subdir) save_path = os.path.join(save_subdir, save_name+'.png') fig.savefig(save_path, dpi=dpi) logger.info(u"\u2713"+f" plot saved successfully as {save_path}") diff --git a/ush/cam/ush_refs_plot_py/time_series.py b/ush/cam/ush_refs_plot_py/time_series.py index 0c09a7bd4e..85fcebda5a 100755 --- a/ush/cam/ush_refs_plot_py/time_series.py +++ b/ush/cam/ush_refs_plot_py/time_series.py @@ -301,12 +301,54 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, plt.close(num) logger.info("========================================") return None + + units = df['FCST_UNITS'].tolist()[0] + metrics_using_var_units = [ + 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', + 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR','SPREAD', + 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' + ] + coef, const = (None, None) + unit_convert = False + if units in reference.unit_conversions: + unit_convert = True + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'HGT': + if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: + if units in ['m','gpm']: + units = 'gpm' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + unit_convert = False + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HGT']: + unit_convert = False + elif str(var_long_name_key).upper() == 'TMP' and level[0] == 'P': + unit_convert = False + elif any(field in str(var_long_name_key).upper() for field in ['WEASD', 'SNOD', 'ASNOW']): + if units in ['m']: + units = 'm_snow' + if unit_convert: + if metric2_name is not None: + if (str(metric1_name).upper() in metrics_using_var_units + and str(metric2_name).upper() in metrics_using_var_units): + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) + elif str(metric1_name).upper() in metrics_using_var_units: + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) # Calculate desired metric metric_long_names = [] for stat in [metric1_name, metric2_name]: if stat: stat_output = plot_util.calculate_stat( - logger, df_aggregated, str(stat).lower() + logger, df_aggregated, str(stat).lower(), [coef, const] ) df_aggregated[str(stat).upper()] = stat_output[0] metric_long_names.append(stat_output[2]) @@ -322,7 +364,7 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, ci_output = df_groups.apply( lambda x: plot_util.calculate_bootstrap_ci( logger, bs_method, x, str(stat).lower(), bs_nrep, - ci_lev, bs_min_samp + ci_lev, bs_min_samp, [coef, const] ) ) if any(ci_output['STATUS'] == 1): @@ -359,7 +401,7 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, ] pivot_metric1 = pd.pivot_table( df_aggregated, values=str(metric1_name).upper(), columns='MODEL', - index=str(date_type).upper() + index=str(date_type).upper(), dropna=False ) if sample_equalization: pivot_counts = pd.pivot_table( @@ -371,7 +413,7 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, if metric2_name is not None: pivot_metric2 = pd.pivot_table( df_aggregated, values=str(metric2_name).upper(), columns='MODEL', - index=str(date_type).upper() + index=str(date_type).upper(), dropna=False ) if keep_shared_events_only: pivot_metric2 = pivot_metric2.dropna() @@ -828,20 +870,17 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: var_long_name_key = 'HPBL' var_long_name = variable_translator[var_long_name_key] - units = df['FCST_UNITS'].tolist()[0] - if units in reference.unit_conversions: + if unit_convert: if thresh and '' not in thresh: thresh_labels = [float(tlab) for tlab in thresh_labels] - thresh_labels = reference.unit_conversions[units]['formula'](thresh_labels) + thresh_labels = reference.unit_conversions[units]['formula']( + thresh_labels, + rounding=True + ) thresh_labels = [str(tlab) for tlab in thresh_labels] units = reference.unit_conversions[units]['convert_to'] if units == '-': units = '' - metrics_using_var_units = [ - 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', - 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', - 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' - ] if metric2_name is not None: metric1_string, metric2_string = metric_long_names if (str(metric1_name).upper() in metrics_using_var_units @@ -885,7 +924,7 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, borderpad=.8, labelspacing=2., columnspacing=3., handlelength=3., handletextpad=.4, borderaxespad=.5) fig.subplots_adjust(bottom=.15, wspace=0, hspace=0) - fig.subplots_adjust(top=.85) + fig.subplots_adjust(top=0.85) ax.grid( visible=True, which='major', axis='both', alpha=.5, linestyle='--', linewidth=.5, zorder=0 @@ -907,6 +946,7 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, xytext=(-50, 21), textcoords='offset points', va='top', fontsize=11, color='dimgrey', ha='center' ) + #fig.subplots_adjust(top=.9) fig.subplots_adjust(top=.85) # Title @@ -986,10 +1026,16 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, else: level_string = f'{level}' level_savename = f'{level}_' + if var_savename == 'ICEC_Z0_mean': + level_string = '' + if var_savename == 'TMP_Z0_mean': + level_string = 'Sea Surface ' if metric2_name is not None: title1 = f'{metric1_string} and {metric2_string}' else: title1 = f'{metric1_string}' + if metric1_string == 'Brier Score': + thresh = '' if interp_pts and '' not in interp_pts: title1+=f' {interp_pts_string}' if thresh and '' not in thresh: @@ -1090,11 +1136,7 @@ def plot_time_series(df: pd.DataFrame, logger: logging.Logger, f'{str(time_period_savename).lower()}' ) if not os.path.isdir(save_subdir): - try: - os.makedirs(save_subdir) - except FileExistsError as e: - logger.warning(f"Several processes are making {save_subdir} at " - + f"the same time. Passing") + os.makedirs(save_subdir) save_path = os.path.join(save_subdir, save_name+'.png') fig.savefig(save_path, dpi=dpi) logger.info(u"\u2713"+f" plot saved successfully as {save_path}") diff --git a/ush/cam/ush_refs_plot_py/valid_hour_average.py b/ush/cam/ush_refs_plot_py/valid_hour_average.py index dfdee0f60a..6f165759ea 100755 --- a/ush/cam/ush_refs_plot_py/valid_hour_average.py +++ b/ush/cam/ush_refs_plot_py/valid_hour_average.py @@ -1,11 +1,10 @@ -#! /usr/bin/env python3 - +#!/usr/bin/env python3 ############################################################################### # # Name: valid_hour_average.py # Contact(s): Marcel Caron # Developed: Nov. 22, 2021 by Marcel Caron -# Last Modified: Dec. 01, 2022 by Marcel Caron +# Last Modified: Jul. 05, 2023 by Marcel Caron # Title: Line plot of verification metric as a function of # valid or init hour # Abstract: Plots METplus output (e.g., BCRMSE) as a line plot, @@ -28,6 +27,7 @@ import matplotlib.image as mpimg from matplotlib.offsetbox import OffsetImage, AnnotationBbox from datetime import datetime, timedelta as td +import shutil SETTINGS_DIR = os.environ['USH_DIR'] sys.path.insert(0, os.path.abspath(SETTINGS_DIR)) @@ -41,7 +41,7 @@ # ================ GLOBALS AND CONSTANTS ================ -plotter = Plotter(fig_size=(28.,14.)) +plotter = Plotter() plotter.set_up_plots() toggle = Toggle() templates = Templates() @@ -65,6 +65,7 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, date_type: str = 'VALID', date_hours: list = [0,6,12,18], anti_date_hours: list = [0,3,6,9,12,15,18,21], verif_type: str = 'pres', save_dir: str = '.', + restart_dir: str = '.', requested_var: str = 'HGT', line_type: str = 'SL1L2', dpi: int = 100, confidence_intervals: bool = False, interp_pts: list = [], @@ -113,23 +114,23 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, frange_phrase = 's '+', '.join([str(f) for f in flead]) else: frange_phrase = ' '+', '.join([str(f) for f in flead]) - frange_save_phrase = '-'.join([str(f) for f in flead]) + frange_save_phrase = '-'.join([str(f).zfill(3) for f in flead]) else: frange_phrase = f's {flead[0]}'+u'\u2013'+f'{flead[-1]}' - frange_save_phrase = f'{flead[0]}_TO_F{flead[-1]}' + frange_save_phrase = f'{flead[0]:03d}-F{flead[-1]:03d}' frange_string = f'Forecast Hour{frange_phrase}' frange_save_string = f'F{frange_save_phrase}' df = df[df['LEAD_HOURS'].isin(flead)] elif isinstance(flead, tuple): frange_string = (f'Forecast Hours {flead[0]:02d}' + u'\u2013' + f'{flead[1]:02d}') - frange_save_string = f'F{flead[0]:02d}-F{flead[1]:02d}' + frange_save_string = f'F{flead[0]:03d}-F{flead[1]:03d}' df = df[ (df['LEAD_HOURS'] >= flead[0]) & (df['LEAD_HOURS'] <= flead[1]) ] elif isinstance(flead, np.int): frange_string = f'Forecast Hour {flead:02d}' - frange_save_string = f'F{flead:02d}' + frange_save_string = f'F{flead:03d}' df = df[df['LEAD_HOURS'] == flead] else: e1 = f"FATAL ERROR: Invalid forecast lead: \'{flead}\'" @@ -156,6 +157,11 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, int(x) in df['ANTI_DATE_HOURS'].tolist() for x in anti_date_hours ]] + if df.empty: + logger.warning(f"Empty Dataframe. Continuing onto next plot...") + plt.close(num) + logger.info("========================================") + return None if interp_pts and '' not in interp_pts: interp_shape = list(df['INTERP_MTHD'])[0] if 'SQUARE' in interp_shape: @@ -169,7 +175,7 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, f"FATAL ERROR: Unknown INTERP_MTHD used to compute INTERP_PNTS: {interp_shape}." + f" Check the INTERP_MTHD column in your METplus stats files." + f" INTERP_MTHD must have either \"SQUARE\" or \"CIRCLE\"" - + f" in the name" + + f" in the name." ) logger.error(error_string) raise ValueError(error_string) @@ -186,8 +192,8 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, interp_pts_string = f'(Width{interp_pts_phrase})' interp_pts_save_string = f'width{interp_pts_save_phrase}' df = df[df['INTERP_PNTS'].isin(interp_pts)] - elif isinstance(intep_pts, np.int): - interp_pts_string = f'(Wifth {widths:d})' + elif isinstance(interp_pts, np.int): + interp_pts_string = f'(Width {widths:d})' interp_pts_save_string = f'width{widths:d}' df = df[df['INTERP_PNTS'] == widths] else: @@ -306,6 +312,21 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, logger.warning(warning_string) logger.warning("Continuing ...") + if df.empty: + logger.warning(f"Empty Dataframe. Continuing onto next plot...") + plt.close(num) + logger.info("========================================") + return None + group_by = ['MODEL','ANTI_DATE_HOURS'] + if sample_equalization: + df, bool_success = plot_util.equalize_samples(logger, df, group_by) + if not bool_success: + sample_equalization = False + if df.empty: + logger.warning(f"Empty Dataframe. Continuing onto next plot...") + plt.close(num) + logger.info("========================================") + return None # Remove from model_list the models that don't exist in the dataframe cols_to_keep = [ str(model) @@ -322,20 +343,19 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, for (m, keep) in zip(model_list, cols_to_keep) if keep ] if not all(cols_to_keep): - logger.warning( - f"{models_removed_string} data were not found and will not be" - + f" plotted." - ) + if not any( + group_name in str(models_removed_string) + for group_name in ["group", "set"] + ): + logger.warning( + f"{models_removed_string} data were not found and will not be" + + f" plotted." + ) if df.empty: logger.warning(f"Empty Dataframe. Continuing onto next plot...") plt.close(num) logger.info("========================================") return None - group_by = ['MODEL','ANTI_DATE_HOURS'] - if sample_equalization: - df, bool_success = plot_util.equalize_samples(logger, df, group_by) - if not bool_success: - sample_equalization = False df_groups = df.groupby(group_by) # Aggregate unit statistics before calculating metrics if str(line_type).upper() == 'CTC': @@ -365,12 +385,51 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, logger.info("========================================") return None + coef, const = (None, None) + units = df['FCST_UNITS'].tolist()[0] + metrics_using_var_units = [ + 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', + 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', + 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' + ] + unit_convert = False + if units in reference.unit_conversions: + unit_convert = True + var_long_name_key = df['FCST_VAR'].tolist()[0] + if str(var_long_name_key).upper() == 'HGT': + if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: + if units in ['m', 'gpm']: + units = 'gpm' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + unit_convert = False + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HGT']: + unit_convert = False + elif any(field in str(var_long_name_key).upper() for field in ['WEASD', 'SNOD', 'ASNOW']): + if units in ['m']: + units = 'm_snow' + if unit_convert: + if metric2_name is not None: + if (str(metric1_name).upper() in metrics_using_var_units + and str(metric2_name).upper() in metrics_using_var_units): + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) + elif str(metric1_name).upper() in metrics_using_var_units: + coef, const = ( + reference.unit_conversions[units]['formula']( + None, + return_terms=True + ) + ) # Calculate desired metric metric_long_names = [] for stat in [metric1_name, metric2_name]: if stat: stat_output = plot_util.calculate_stat( - logger, df_aggregated, str(stat).lower() + logger, df_aggregated, str(stat).lower(), [coef, const] ) df_aggregated[str(stat).upper()] = stat_output[0] metric_long_names.append(stat_output[2]) @@ -378,7 +437,7 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, ci_output = df_groups.apply( lambda x: plot_util.calculate_bootstrap_ci( logger, bs_method, x, str(stat).lower(), bs_nrep, - ci_lev, bs_min_samp + ci_lev, bs_min_samp, [coef, const] ) ) if any(ci_output['STATUS'] == 1): @@ -464,7 +523,7 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, print_varname = df['FCST_VAR'].tolist()[0] logger.warning( f"Could not find (and cannot plot) {metric1_name}" - f" stats for {print_varname} at any level. " + + f" stats for {print_varname} at any level. " + f"This often happens when processed data are all NaNs, " + f" which are removed. Check for seasonal cases where critical " + f" threshold is not reached. Continuing ..." @@ -659,6 +718,7 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, connect_points = True else: connect_points = False + n_mods = 0 for m in range(len(mod_setting_dicts)): if model_list[m] in model_colors.model_alias: model_plot_name = ( @@ -666,6 +726,8 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, ) else: model_plot_name = model_list[m] + if str(model_list[m]) not in pivot_metric1: + continue y_vals_metric1 = pivot_metric1[str(model_list[m])].values y_vals_metric1_mean = np.nanmean(y_vals_metric1) if metric2_name is not None: @@ -701,9 +763,10 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, else: y_vals_metric_min = np.nanmin(y_vals_metric1) y_vals_metric_max = np.nanmax(y_vals_metric1) - if m == 0: + if n_mods == 0: y_mod_min = y_vals_metric_min y_mod_max = y_vals_metric_max + n_mods+=1 else: if math.isinf(y_mod_min): y_mod_min = y_vals_metric_min @@ -761,13 +824,13 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, metric2_mean_fmt_string = f'{y_vals_metric2_mean:.2E}' if plot_reference[1]: if not plotted_reference[1]: + ref_color_dict = model_colors.get_color_dict('obs') if connect_points: x_vals2_plot = x_vals2[~np.isnan(reference2)] y_vals2_plot = reference2[~np.isnan(reference2)] else: x_vals2_plot = x_vals2 y_vals2_plot = reference2 - ref_color_dict = model_colors.get_color_dict('obs') plt.plot( x_vals2_plot.tolist(), y_vals2_plot, marker=ref_color_dict['marker'], @@ -870,10 +933,7 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, x_val for x_val in np.arange(xticks_min, xticks_max+incr, incr) ] xtick_labels = [str(xtick) for xtick in xticks] - if len(xticks) < 48: - show_xtick_every = 1 - else: - show_xtick_every = 2 + show_xtick_every = len(xticks)//40+1 xtick_labels_with_blanks = ['' for item in xtick_labels] for i, item in enumerate(xtick_labels[::int(show_xtick_every)]): xtick_labels_with_blanks[int(show_xtick_every)*i] = item @@ -886,6 +946,10 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, for y in [-5,-4,-3,-2,-1,0,1,2,3,4,5] ]).flatten() round_to_nearest_categories = y_range_categories/20. + if math.isinf(y_min): + y_min = y_min_limit + if math.isinf(y_max): + y_max = y_max_limit y_range = y_max-y_min round_to_nearest = round_to_nearest_categories[ np.digitize(y_range, y_range_categories[:-1]) @@ -896,7 +960,24 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, ylim_min = float( np.format_float_scientific(ylim_min, unique=False, precision=3) ) - yticks = np.arange(ylim_min, ylim_max+round_to_nearest, round_to_nearest) + if round_to_nearest < 1.: + y_precision_scale = 100/round_to_nearest + else: + y_precision_scale = 1. + yticks = [ + y_val for y_val + in np.arange( + ylim_min*y_precision_scale, + ylim_max*y_precision_scale+round_to_nearest*y_precision_scale, + round_to_nearest*y_precision_scale + ) + ] + yticks=np.divide(yticks,y_precision_scale) + ytick_labels = [f'{ytick}' for ytick in yticks] + show_ytick_every = len(yticks)//10+1 + ytick_labels_with_blanks = ['' for item in ytick_labels] + for i, item in enumerate(ytick_labels[::int(show_ytick_every)]): + ytick_labels_with_blanks[int(show_ytick_every)*i] = item var_long_name_key = df['FCST_VAR'].tolist()[0] if str(var_long_name_key).upper() == 'HGT': if str(df['OBS_VAR'].tolist()[0]).upper() in ['CEILING']: @@ -904,28 +985,52 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: var_long_name_key = 'HPBL' var_long_name = variable_translator[var_long_name_key] - units = df['FCST_UNITS'].tolist()[0] - if units in reference.unit_conversions: + if unit_convert: if fcst_thresh and '' not in fcst_thresh: fcst_thresh_labels = [float(tlab) for tlab in fcst_thresh_labels] fcst_thresh_labels = ( - reference.unit_conversions[units]['formula'](fcst_thresh_labels) + reference.unit_conversions[units]['formula']( + fcst_thresh_labels, + rounding=True + ) ) fcst_thresh_labels = [str(tlab) for tlab in fcst_thresh_labels] + requested_fcst_thresh_labels = [ + float(tlab) for tlab in requested_fcst_thresh_labels + ] + requested_fcst_thresh_labels = ( + reference.unit_conversions[units]['formula']( + requested_fcst_thresh_labels, + rounding=True + ) + ) + requested_fcst_thresh_labels = [ + str(tlab) for tlab in requested_fcst_thresh_labels + ] if obs_thresh and '' not in obs_thresh: obs_thresh_labels = [float(tlab) for tlab in obs_thresh_labels] obs_thresh_labels = ( - reference.unit_conversions[units]['formula'](obs_thresh_labels) + reference.unit_conversions[units]['formula']( + obs_thresh_labels, + rounding=True + ) ) obs_thresh_labels = [str(tlab) for tlab in obs_thresh_labels] + requested_obs_thresh_labels = [ + float(tlab) for tlab in requested_obs_thresh_labels + ] + requested_obs_thresh_labels = ( + reference.unit_conversions[units]['formula']( + requested_obs_thresh_labels, + rounding=True + ) + ) + requested_obs_thresh_labels = [ + str(tlab) for tlab in requested_obs_thresh_labels + ] units = reference.unit_conversions[units]['convert_to'] if units == '-': units = '' - metrics_using_var_units = [ - 'BCRMSE','RMSE','BIAS','ME','FBAR','OBAR','MAE','FBAR_OBAR', - 'SPEED_ERR','DIR_ERR','RMSVE','VDIFF_SPEED','VDIF_DIR', - 'FBAR_OBAR_SPEED','FBAR_OBAR_DIR','FBAR_SPEED','FBAR_DIR' - ] if metric2_name is not None: metric1_string, metric2_string = metric_long_names if (str(metric1_name).upper() in metrics_using_var_units @@ -949,6 +1054,7 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, ax.set_ylabel(ylabel) ax.set_xlabel(xlabel) ax.set_xticklabels(xtick_labels_with_blanks) + ax.set_yticklabels(ytick_labels_with_blanks) ax.set_yticks(yticks) ax.set_xticks(xticks) ax.tick_params( @@ -958,14 +1064,15 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, left=False, labelleft=False, labelright=False, labelbottom=False, labeltop=False, which='minor', axis='y', pad=15 ) + majyticks = [i for i, item in enumerate(ytick_labels_with_blanks) if item] + for mt in majyticks: + ax.yaxis.get_major_ticks()[mt].tick1line.set_markersize(8) ax.legend( - handles, labels, loc='upper center', fontsize=15, framealpha=1, - bbox_to_anchor=(0.5, -0.08), ncol=4, frameon=True, numpoints=2, - borderpad=.8, labelspacing=2., columnspacing=3., handlelength=3., - handletextpad=.4, borderaxespad=.5) - fig.subplots_adjust(bottom=.15, wspace=0, hspace=0) - fig.subplots_adjust(top=.85) + handles, labels, framealpha=1, + bbox_to_anchor=(0.5, -.15), ncol=4, frameon=True, numpoints=2, + borderpad=.8, labelspacing=1.) + fig.subplots_adjust(wspace=0, hspace=0) ax.grid( visible=True, which='major', axis='both', alpha=.5, linestyle='--', linewidth=.5, zorder=0 @@ -978,8 +1085,8 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, count = str(int(count)) ax.annotate( f'{count}', xy=(xval,1.), - xycoords=('data','axes fraction'), xytext=(0,18), - textcoords='offset points', va='top', fontsize=16, + xycoords=('data','axes fraction'), xytext=(0,12), + textcoords='offset points', va='top', fontsize=11, color='dimgrey', ha='center' ) ax.annotate( @@ -987,85 +1094,93 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, xytext=(-50, 21), textcoords='offset points', va='top', fontsize=11, color='dimgrey', ha='center' ) - fig.subplots_adjust(top=.85) # Title domain = df['VX_MASK'].tolist()[0] var_savename = df['FCST_VAR'].tolist()[0] + if 'APCP' in var_savename.upper(): + var_savename = 'APCP' + elif any(field in var_savename.upper() for field in ['ASNOW','SNOD']): + var_savename = 'ASNOW' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['HPBL']: + var_savename = 'HPBL' + elif str(df['OBS_VAR'].tolist()[0]).upper() in ['MSLET','MSLMA','PRMSL']: + var_savename = 'MSLET' if domain in list(domain_translator.keys()): - domain_string = domain_translator[domain] + domain_string = domain_translator[domain]['long_name'] + domain_save_string = domain_translator[domain]['save_name'] else: domain_string = domain + domain_save_string = domain date_hours_string = plot_util.get_name_for_listed_items( [f'{date_hour:02d}' for date_hour in date_hours], ', ', '', 'Z', 'and ', '' ) - ''' - date_hours_string = ' '.join([ - f'{date_hour:02d}Z,' for date_hour in date_hours - ]) - ''' date_start_string = date_range[0].strftime('%d %b %Y') date_end_string = date_range[1].strftime('%d %b %Y') if str(level).upper() in ['CEILING', 'TOTAL', 'PBL']: if str(level).upper() == 'CEILING': level_string = '' - level_savename = '' + level_savename = 'L0' elif str(level).upper() == 'TOTAL': level_string = 'Total ' - level_savename = '' + level_savename = 'L0' elif str(level).upper() == 'PBL': level_string = '' - level_savename = '' + level_savename = 'L0' elif str(verif_type).lower() in ['pres', 'upper_air', 'raob'] or 'P' in str(level): if 'P' in str(level): if str(level).upper() == 'P90-0': level_string = f'Mixed-Layer ' - level_savename = f'ML' + level_savename = f'L90' else: level_num = level.replace('P', '') level_string = f'{level_num} hPa ' - level_savename = f'{level_num}MB_' + level_savename = f'{level}' elif str(level).upper() == 'L0': level_string = f'Surface-Based ' - level_savename = f'SB' + level_savename = f'{level}' else: level_string = '' - level_savename = '' + level_savename = f'{level}' elif str(verif_type).lower() in ['sfc', 'conus_sfc', 'polar_sfc', 'mrms', 'metar']: if 'Z' in str(level): if str(level).upper() == 'Z0': if str(var_long_name_key).upper() in ['MLSP', 'MSLET', 'MSLMA', 'PRMSL']: level_string = '' - level_savename = '' + level_savename = f'{level}' else: level_string = 'Surface ' - level_savename = 'SFC_' + level_savename = f'{level}' else: level_num = level.replace('Z', '') if var_savename in ['TSOIL', 'SOILW']: level_string = f'{level_num}-cm ' - level_savename = f'{level_num}CM_' + level_savename = f'{level_num}CM' else: level_string = f'{level_num}-m ' - level_savename = f'{level_num}M_' - elif 'L' in str(level) or 'A' in str(level): + level_savename = f'{level}' + elif 'L' in str(level): level_string = '' - level_savename = '' + level_savename = f'{level}' + elif 'A' in str(level): + level_num = level.replace('A', '') + level_string = f'{level_num}-hour ' + level_savename = f'A{level_num.zfill(2)}' else: level_string = f'{level} ' - level_savename = f'{level}_' - elif str(verif_type).lower() in ['ccpa']: + level_savename = f'{level}' + elif str(verif_type).lower() in ['ccpa', 'mrms']: if 'A' in str(level): level_num = level.replace('A', '') level_string = f'{level_num}-hour ' - level_savename = f'{level_num}H_' + level_savename = f'A{level_num.zfill(2)}' else: level_string = f'' - level_savename = f'' + level_savename = f'{level}' else: - level_string = f'{level}' - level_savename = f'{level}_' + level_string = f'{level} ' + level_savename = f'{level}' if metric2_name is not None: title1 = f'{metric1_string} and {metric2_string}' else: @@ -1074,24 +1189,37 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, title1+=f' {interp_pts_string}' fcst_thresh_on = (fcst_thresh and '' not in fcst_thresh) obs_thresh_on = (obs_thresh and '' not in obs_thresh) + tail_dot_rgx = re.compile(r'(?:(\.)|(\.\d*?[1-9]\d*?))0+(?=\b|[^0-9])') if fcst_thresh_on: + fcst_thresh_labels = [ + tail_dot_rgx.sub(r'\2', lab) for lab in fcst_thresh_labels + ] fcst_thresholds_phrase = ', '.join([ f'{opt}{fcst_thresh_label}' for fcst_thresh_label in fcst_thresh_labels ]) + requested_fcst_thresh_labels = [ + tail_dot_rgx.sub(r'\2', lab) for lab in requested_fcst_thresh_labels + ] fcst_thresholds_save_phrase = ''.join([ f'{opt_letter}{fcst_thresh_label}' for fcst_thresh_label in requested_fcst_thresh_labels - ]) + ]).replace('.','p') if obs_thresh_on: + obs_thresh_labels = [ + tail_dot_rgx.sub(r'\2', lab) for lab in obs_thresh_labels + ] obs_thresholds_phrase = ', '.join([ f'obs{opt}{obs_thresh_label}' for obs_thresh_label in obs_thresh_labels ]) + requested_obs_thresh_labels = [ + tail_dot_rgx.sub(r'\2', lab) for lab in requested_obs_thresh_labels + ] obs_thresholds_save_phrase = ''.join([ f'obs{opt_letter}{obs_thresh_label}' for obs_thresh_label in requested_obs_thresh_labels - ]) + ]).replace('.','p') if fcst_thresh_on: if units: title2 = (f'{level_string}{var_long_name} ({fcst_thresholds_phrase}' @@ -1115,10 +1243,10 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, + f'{date_start_string} to {date_end_string}, {frange_string}') title_center = '\n'.join([title1, title2, title3]) if sample_equalization: - title_pad=40 + title_pad=23 else: title_pad=None - ax.set_title(title_center, loc=plotter.title_loc, pad=title_pad) + ax.set_title(title_center, pad=title_pad) logger.info("... Plotting complete.") # Logos @@ -1170,34 +1298,44 @@ def plot_valid_hour_average(df: pd.DataFrame, logger: logging.Logger, time_period_savename = f'{date_start_savename}-{date_end_savename}' else: time_period_savename = f'{eval_period}' - save_name = (f'valid_hour_average_regional_' - + f'{str(domain).lower()}_{str(date_type).lower()}_' - + f'{str(date_hours_savename).lower()}_' - + f'{str(level_savename).lower()}' - + f'{str(var_savename).lower()}_{str(metric1_name).lower()}') + + plot_info = f'vhrmean' + save_name = (f'{str(metric1_name).lower()}') if metric2_name is not None: save_name+=f'_{str(metric2_name).lower()}' - if interp_pts and '' not in interp_pts: - save_name+=f'_{str(interp_pts_save_string).lower()}' - save_name+=f'_{str(frange_save_string).lower()}' if fcst_thresh_on: save_name+=f'_{str(fcst_thresholds_save_phrase).lower()}' elif obs_thresh_on: save_name+=f'_{str(obs_thresholds_save_phrase).lower()}' + if interp_pts and '' not in interp_pts: + save_name+=f'_{str(interp_pts_save_string).lower()}' + save_name+=f'.{str(var_savename).lower()}' + if level_savename: + save_name+=f'_{str(level_savename).lower()}' + save_name+=f'.{str(time_period_savename).lower()}' + save_name+=f'.{plot_info}' + save_name+=f'.{str(domain_save_string).lower()}' + if save_header: - save_name = f'{save_header}_'+save_name + save_name = f'{save_header}.'+save_name save_subdir = os.path.join( save_dir, f'{str(plot_group).lower()}', f'{str(time_period_savename).lower()}' ) if not os.path.isdir(save_subdir): - try: - os.makedirs(save_subdir) - except FileExistsError as e: - logger.warning(f"Several processes are making {save_subdir} at " - + f"the same time. Passing") + os.makedirs(save_subdir) save_path = os.path.join(save_subdir, save_name+'.png') fig.savefig(save_path, dpi=dpi) + if restart_dir: + shutil.copy2( + save_path, + os.path.join( + restart_dir, + f'{str(plot_group).lower()}', + f'{str(time_period_savename).lower()}', + save_name+'.png' + ) + ) logger.info(u"\u2713"+f" plot saved successfully as {save_path}") plt.close(num) logger.info('========================================') @@ -1207,21 +1345,21 @@ def main(): # Logging log_metplus_dir = '/' - for subdir in LOG_METPLUS.split('/')[:-1]: + for subdir in LOG_TEMPLATE.split('/')[:-1]: log_metplus_dir = os.path.join(log_metplus_dir, subdir) if not os.path.isdir(log_metplus_dir): os.makedirs(log_metplus_dir) - logger = logging.getLogger(LOG_METPLUS) + logger = logging.getLogger(LOG_TEMPLATE) logger.setLevel(LOG_LEVEL) formatter = logging.Formatter( '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' + '%(message)s', '%m/%d %H:%M:%S' ) - file_handler = logging.FileHandler(LOG_METPLUS, mode='a') + file_handler = logging.FileHandler(LOG_TEMPLATE, mode='a') file_handler.setFormatter(formatter) logger.addHandler(file_handler) - logger_info = f"Log file: {LOG_METPLUS}" + logger_info = f"Log file: {LOG_TEMPLATE}" print(logger_info) logger.info(logger_info) @@ -1428,17 +1566,25 @@ def main(): logger.warning(e) logger.warning("Continuing ...") plot_group = var_specs['plot_group'] - for l, fcst_level in enumerate(FCST_LEVELS): - if len(FCST_LEVELS) != len(OBS_LEVELS): + if FCST_LEVELS in presets.level_presets: + fcst_levels = re.split(r',(?![0*])', presets.level_presets[FCST_LEVELS].replace(' ','')) + else: + fcst_levels = re.split(r',(?![0*])', FCST_LEVELS.replace(' ','')) + if OBS_LEVELS in presets.level_presets: + obs_levels = re.split(r',(?![0*])', presets.level_presets[OBS_LEVELS].replace(' ','')) + else: + obs_levels = re.split(r',(?![0*])', OBS_LEVELS.replace(' ','')) + for l, fcst_level in enumerate(fcst_levels): + if len(fcst_levels) != len(obs_levels): e = ("FATAL ERROR: FCST_LEVELS and OBS_LEVELS must be lists of the same" + f" size") logger.error(e) logger.error("Quitting ...") raise ValueError(e+"\nQuitting ...") - if (FCST_LEVELS[l] not in var_specs['fcst_var_levels'] - or OBS_LEVELS[l] not in var_specs['obs_var_levels']): + if (fcst_levels[l] not in var_specs['fcst_var_levels'] + or obs_levels[l] not in var_specs['obs_var_levels']): e = (f"The requested variable/level combination is not valid: " - + f"{requested_var}/{level}") + + f"{requested_var}/{fcst_level}") logger.warning(e) continue for domain in DOMAINS: @@ -1453,7 +1599,7 @@ def main(): logger, STATS_DIR, PRUNE_DIR, OUTPUT_BASE_TEMPLATE, VERIF_CASE, VERIF_TYPE, LINE_TYPE, DATE_TYPE, date_range, EVAL_PERIOD, date_hours, FLEADS, requested_var, fcst_var_names, obs_var_names, - MODELS, domain, INTERP, MET_VERSION, clear_prune_dir + MODELS, domain, INTERP, INTERP_PNTS, MET_VERSION, clear_prune_dir ) if df is None: continue @@ -1467,7 +1613,8 @@ def main(): y_max_limit=Y_MAX_LIMIT, y_lim_lock=Y_LIM_LOCK, xlabel=xlabel, line_type=LINE_TYPE, verif_type=VERIF_TYPE, date_hours=date_hours, anti_date_hours=anti_date_hours, - eval_period=EVAL_PERIOD, save_dir=SAVE_DIR, + eval_period=EVAL_PERIOD, save_dir=SAVE_DIR, + restart_dir=RESTART_DIR, display_averages=display_averages, save_header=URL_HEADER, plot_group=plot_group, confidence_intervals=CONFIDENCE_INTERVALS, bs_nrep=bs_nrep, @@ -1500,7 +1647,7 @@ def main(): DATE_TYPE = check_DATE_TYPE(os.environ['DATE_TYPE']) LINE_TYPE = check_LINE_TYPE(os.environ['LINE_TYPE']) INTERP = check_INTERP(os.environ['INTERP']) - MODELS = check_MODEL(os.environ['MODEL']).replace(' ','').split(',') + MODELS = check_MODELS(os.environ['MODELS']).replace(' ','').split(',') DOMAINS = check_VX_MASK_LIST(os.environ['VX_MASK_LIST']).replace(' ','').split(',') # valid hour (each plot will use all available valid_hours listed below) @@ -1525,8 +1672,8 @@ def main(): FLEADS = check_FCST_LEAD(os.environ['FCST_LEAD']).replace(' ','').split(',') # list of levels - FCST_LEVELS = re.split(r',(?![0*])', check_FCST_LEVEL(os.environ['FCST_LEVEL']).replace(' ','')) - OBS_LEVELS = re.split(r',(?![0*])', check_OBS_LEVEL(os.environ['OBS_LEVEL']).replace(' ','')) + FCST_LEVELS = check_FCST_LEVEL(os.environ['FCST_LEVEL']) + OBS_LEVELS = check_OBS_LEVEL(os.environ['OBS_LEVEL']) FCST_THRESH = check_FCST_THRESH(os.environ['FCST_THRESH'], LINE_TYPE) OBS_THRESH = check_OBS_THRESH(os.environ['OBS_THRESH'], FCST_THRESH, LINE_TYPE).replace(' ','').split(',') @@ -1579,7 +1726,7 @@ def main(): print("\n===================================================================\n") # ============= END USER CONFIGURATIONS ================= - LOG_METPLUS = str(LOG_METPLUS) + LOG_TEMPLATE = str(LOG_TEMPLATE) LOG_LEVEL = str(LOG_LEVEL) MET_VERSION = float(MET_VERSION) VALID_HOURS = [ @@ -1591,8 +1738,6 @@ def main(): FLEADS = [int(flead) for flead in FLEADS] INTERP_PNTS = [str(pts) for pts in INTERP_PNTS] VERIF_CASETYPE = str(VERIF_CASE).lower() + '_' + str(VERIF_TYPE).lower() - FCST_LEVELS = [str(level) for level in FCST_LEVELS] - OBS_LEVELS = [str(level) for level in OBS_LEVELS] CONFIDENCE_INTERVALS = str(CONFIDENCE_INTERVALS).lower() in [ 'true', '1', 't', 'y', 'yes' ] From cfb1011d0a74eed05748b8c6e953650f3d1f93d7 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Fri, 15 Nov 2024 21:45:41 +0000 Subject: [PATCH 02/19] Relocate =evs --- .../cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh | 4 +--- .../cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh | 4 +--- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh | 4 +--- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh | 4 +--- .../cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh | 4 +--- .../cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh | 4 +--- .../plots/cam/jevs_cam_refs_precip_last31days_plots.sh | 4 +--- .../plots/cam/jevs_cam_refs_precip_last90days_plots.sh | 4 +--- .../scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh | 5 +---- .../plots/cam/jevs_cam_refs_profile_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_profile_last90days_plots.sh | 5 +---- .../plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh | 4 +--- .../plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh | 4 +--- .../plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh | 5 +---- .../plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh | 5 +---- .../scripts/stats/cam/jevs_cam_refs_grid2obs_stats.sh | 2 +- dev/drivers/scripts/stats/cam/jevs_cam_refs_precip_stats.sh | 2 +- .../scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.sh | 2 +- 18 files changed, 18 insertions(+), 50 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh index 495147d3cd..d9afe80893 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh index 84db6c145a..5819de9014 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh index 0feb436470..0c866b1843 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh index a3dcee2291..4d88804c65 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh index df55c753a6..92f9af1d03 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh index eddd3468d8..185a6633e5 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh index 59179262df..21b10aef9b 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh index 405bc44284..190c002d77 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh index a0d430606d..7cf7939edf 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh @@ -11,16 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh index 940c5683ec..c96733c810 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh @@ -11,13 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh index b2683671d7..c7751f7547 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh @@ -13,14 +13,11 @@ export OMP_NUM_THREADS=1 export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS +export NET=evs source $HOMEevs/versions/run.ver - - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh index 200b1bc2f7..b98ebc70ac 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh index e54e6f79f5..b82f987ba8 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh @@ -11,15 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh index e145b8304f..06a0f687fd 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh @@ -11,16 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh index 5603e393a4..034e1ab643 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh @@ -11,16 +11,13 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver - - - export envir=prod -export NET=evs export STEP=plots export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/stats/cam/jevs_cam_refs_grid2obs_stats.sh b/dev/drivers/scripts/stats/cam/jevs_cam_refs_grid2obs_stats.sh index 8459c89472..a402d573b8 100755 --- a/dev/drivers/scripts/stats/cam/jevs_cam_refs_grid2obs_stats.sh +++ b/dev/drivers/scripts/stats/cam/jevs_cam_refs_grid2obs_stats.sh @@ -11,10 +11,10 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver -export NET=evs export STEP=stats export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/stats/cam/jevs_cam_refs_precip_stats.sh b/dev/drivers/scripts/stats/cam/jevs_cam_refs_precip_stats.sh index 59ea363aba..3917f9bf0f 100755 --- a/dev/drivers/scripts/stats/cam/jevs_cam_refs_precip_stats.sh +++ b/dev/drivers/scripts/stats/cam/jevs_cam_refs_precip_stats.sh @@ -13,10 +13,10 @@ export OMP_NUM_THREADS=1 ## 3x7 conus(ccpa) + 3x7 alaska(mrms) + 2 snow = 44 jobs +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver -export NET=evs export STEP=stats export COMPONENT=cam export RUN=atmos diff --git a/dev/drivers/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.sh b/dev/drivers/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.sh index d1098d9c18..38b1606bda 100755 --- a/dev/drivers/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.sh +++ b/dev/drivers/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.sh @@ -11,10 +11,10 @@ set -x export OMP_NUM_THREADS=1 +export NET=evs export HOMEevs=/lfs/h2/emc/vpppg/noscrub/${USER}/EVS source $HOMEevs/versions/run.ver -export NET=evs export STEP=stats export COMPONENT=cam export RUN=atmos From 18e0955a116207bde6942021590e5d2a862597d6 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Sun, 17 Nov 2024 01:52:41 +0000 Subject: [PATCH 03/19] Fix parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf --- .../grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf b/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf index 78c19e3421..a43b09298d 100755 --- a/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf +++ b/parm/metplus_config/stats/cam/grid2obs/PointStat_fcstREFSmean_obsPREPBUFR_SPCoutlook.conf @@ -35,7 +35,7 @@ OBS_VAR1_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION; FCST_VAR2_NAME = CAPE FCST_VAR2_LEVELS = P0-90 -FCST_VAR2_OPTIONS = cnt_thresh = [ NA ] ; cnt_logic = INTERSECTION; +FCST_VAR2_OPTIONS = cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; OBS_VAR2_NAME = MLCAPE OBS_VAR2_LEVELS = L0-90000 OBS_VAR2_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION; From 3b1553d6f1ca51086e7538be9477a5d075f6156f Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Mon, 18 Nov 2024 23:29:27 +0000 Subject: [PATCH 04/19] Match ecf and dev scripts --- .../scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh | 2 +- ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf | 2 +- ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf | 2 +- ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_profile_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_profile_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf | 2 +- ecf/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.ecf | 2 +- scripts/plots/cam/exevs_refs_precip_spatial_plots.sh | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh index 7cf7939edf..a462c07a7b 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=2:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=1:mem=100GB #PBS -l debug=true set -x diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf index c204b1a5f5..4fe2238e76 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=4:ncpus=78:mem=100GB +#PBS -l place=vscatter,select=4:ncpus=78:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf index 1df7c62a8f..a310d2d21d 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=4:ncpus=78:mem=200GB +#PBS -l place=vscatter,select=4:ncpus=78:mem=200GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf index 263020bb6f..6ca3ce39b5 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=1:ncpus=2:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=1:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf index 60ef646483..5718a0c543 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter:shared,select=1:ncpus=60:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=60:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf index eda4ee7efc..d84c9a2afc 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter:shared,select=1:ncpus=60:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=60:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf index 749c89b469..813bf92893 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter:shared,select=1:ncpus=30:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=30:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf index 7eda57b911..367687a7c7 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=1:ncpus=30:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=30:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf index d5e55374f8..484fd4024f 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter:shared,select=1:ncpus=6:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=6:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf index 64717dcb1f..938d17a105 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:20:00 -#PBS -l place=vscatter:shared,select=1:ncpus=6:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=6:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.ecf b/ecf/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.ecf index 03a7521755..b0e302d753 100755 --- a/ecf/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.ecf +++ b/ecf/scripts/stats/cam/jevs_cam_refs_spcoutlook_stats.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:45:00 -#PBS -l place=vscatter:shared,select=1:ncpus=2:mem=25GB +#PBS -l place=vscatter,select=1:ncpus=2:mem=25GB #PBS -l debug=true export model=evs diff --git a/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh b/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh index 9a38870d1f..18e9281dfb 100755 --- a/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh +++ b/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/ksh #******************************************************************************* # Purpose: setup environment, paths, and run the refs precip spatial map plotting # python script From 23626cfc4b66521eaf0d2b48cd32b752109f80fc Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Wed, 20 Nov 2024 20:50:17 +0000 Subject: [PATCH 05/19] Update plot output file naming --- scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh | 4 ++-- scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh | 4 ++-- scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh | 4 ++-- scripts/plots/cam/exevs_refs_precip_plots.sh | 4 ++-- scripts/plots/cam/exevs_refs_precip_spatial_plots.sh | 6 +++--- scripts/plots/cam/exevs_refs_profile_plots.sh | 4 ++-- scripts/plots/cam/exevs_refs_spcoutlook_plots.sh | 4 ++-- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh index e2c050210e..0250339f2b 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh @@ -323,12 +323,12 @@ for score_type in lead_average threshold_average; do for thresh in ge250 ge500 ge1000 ge2000 ; do if [ -s ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}_${thresh}.png ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}_${thresh}.png evs.refs.${stat}.${var}_${level}.${thresh}.last${last_days}days.${scoretype}_valid_${valid}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}_${thresh}.png evs.refs.${stat}.${var}_${level}.${thresh}.last${last_days}days.${scoretype}_valid${valid}.${new_domain}.png fi done else if [ -s ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}_${lead}.png ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}_${lead}.png evs.refs.${stat}.${var}_${level}.last${last_days}days.${scoretype}_valid_${valid}.${new_lead}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}_${lead}.png evs.refs.${stat}.${var}_${level}.last${last_days}days.${scoretype}_valid${valid}_${new_lead}.${new_domain}.png fi fi diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh index 016a932845..1991297050 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh @@ -342,7 +342,7 @@ for valid in 00z 03z 06z 09z 12z 15z 18z 21z ; do fi if [ -s performance_diagram_regional_${domain}_valid_${valid}_${var}_*.png ] ; then - mv performance_diagram_regional_${domain}_valid_${valid}_${var}_*.png evs.refs.ctc.${var_new}_${level}.last${last_days}days.perfdiag_valid_${valid}.${new_domain}.png + mv performance_diagram_regional_${domain}_valid_${valid}_${var}_*.png evs.refs.ctc.${var_new}_${level}.last${last_days}days.perfdiag_valid${valid}.${new_domain}.png fi done @@ -385,7 +385,7 @@ for valid in 00z 03z 06z 09z 12z 15z 18z 21z ; do fi if [ -s ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}*.png ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}*.png evs.refs.${stat}.${var_new}_${level}.last${last_days}days.${scoretype}_valid_${valid}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stat}*.png evs.refs.${stat}.${var_new}_${level}.last${last_days}days.${scoretype}_valid${valid}.${new_domain}.png fi done #domain diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh index 814b5eaa77..2b688c5f0a 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh @@ -269,11 +269,11 @@ for stats in rmse_spread ; do if [ $var = mslet ] || [ $var = gust ] || [ $var = hpbl ] ; then if [ -s ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}.png ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid_${valid}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid${valid}.${new_domain}.png fi else if [ -s ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}.png ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid_${valid}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid${valid}.${new_domain}.png fi fi diff --git a/scripts/plots/cam/exevs_refs_precip_plots.sh b/scripts/plots/cam/exevs_refs_precip_plots.sh index 1033c124fd..bc7e8b798e 100755 --- a/scripts/plots/cam/exevs_refs_precip_plots.sh +++ b/scripts/plots/cam/exevs_refs_precip_plots.sh @@ -288,7 +288,7 @@ for stats in ets fbias fss ; do if [ -s ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}_${lead}.png ] ; then ls ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}_${lead}.png - mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var}h.last${last_days}days.${scoretype}_valid_${valid}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var}h.last${last_days}days.${scoretype}_valid${valid}.${new_domain}.png fi done done @@ -327,7 +327,7 @@ for var in apcp_01 apcp_03 apcp_24 ; do fi if [ -s ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${lead}.png ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${lead}.png evs.refs.ctc.${var}h.last${last_days}days.${scoretype}_valid_${valid}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${lead}.png evs.refs.ctc.${var}h.last${last_days}days.${scoretype}_valid${valid}.${new_domain}.png fi done diff --git a/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh b/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh index 18e9281dfb..1b1aeb0b1e 100755 --- a/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh +++ b/scripts/plots/cam/exevs_refs_precip_spatial_plots.sh @@ -77,9 +77,9 @@ export err=$?; err_chk cd $DATA/grid2grid_plots/plot_output/atmos.${VDATE}/precip/SL1L2_FBAR_24hrAccumMaps_CONUS_precip_spatial_map/images -if [ -s *.gif ] ; than - tar -cvf evs.plots.refs.precip.spatial.map.v${VDATE}.tar *.gif -fi +if [ -s refs*.gif ] ; then + tar -cvf evs.plots.refs.precip.spatial.map.v${VDATE}.tar *.gif +fi # Cat the plotting log files log_dirs="$DATA/*/*/*/logs" diff --git a/scripts/plots/cam/exevs_refs_profile_plots.sh b/scripts/plots/cam/exevs_refs_profile_plots.sh index 23457ccf80..ce8e406c79 100755 --- a/scripts/plots/cam/exevs_refs_profile_plots.sh +++ b/scripts/plots/cam/exevs_refs_profile_plots.sh @@ -339,11 +339,11 @@ for valid in 00z 12z ; do if [ ${score_type} = lead_average ] ; then if [ -s ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}_${end} ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}_${end} evs.refs.${stats}.${var_new}.last${last_days}days.${scoretype}_valid_${valid}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}_${end} evs.refs.${stats}.${var_new}.last${last_days}days.${scoretype}_valid${valid}.${new_domain}.png fi else if [ -s ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}_${lead}.png ] ; then - mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var_new}.last${last_days}days.${scoretype}_valid_${valid}_${new_lead}.${new_domain}.png + mv ${score_type}_regional_${domain}_valid_${valid}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var_new}.last${last_days}days.${scoretype}_valid${valid}_${new_lead}.${new_domain}.png fi fi done #lead diff --git a/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh b/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh index e8b2fdef1a..8e801e33d0 100755 --- a/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh +++ b/scripts/plots/cam/exevs_refs_spcoutlook_plots.sh @@ -229,11 +229,11 @@ for domain in day1_mrgl day1_slgt day1_tstm day1_enh day1_mdt day1_high day2_mrg if [ $var = cape ] ; then var_new=cape level=l0 - valid=valid_00z_12z + valid=valid00z12z elif [ $var = mlcape ] ; then var_new=mlcape level=ml - valid=valid_00z_12z + valid=valid00z12z fi for all_times in 00z 12z 00z_12z ; do if ls lead_average_regional_${domain}_valid_${all_times}_${var}*.png 1> /dev/null 2>&1; then From 7fb9e6e020b117f2aa12ae59b992c400c417e113 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Wed, 20 Nov 2024 21:11:03 +0000 Subject: [PATCH 06/19] Update EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf --- .../cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf b/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf index ba2329113c..79e7ebd212 100755 --- a/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf +++ b/parm/metplus_config/stats/cam/grid2obs/EnsembleStat_fcstREFS_obsPREPBUFR_SFC.conf @@ -63,7 +63,7 @@ OBS_VAR6_LEVELS = {FCST_VAR6_LEVELS} FCST_VAR7_NAME = CAPE FCST_VAR7_LEVELS = L0 -FCST_VAR7_OPTIONS = GRIB_lvl_typ = 1 +FCST_VAR7_OPTIONS = GRIB_lvl_typ = 1; cnt_thresh = [ NA ]; cnt_logic = INTERSECTION; OBS_VAR7_NAME = {FCST_VAR7_NAME} OBS_VAR7_LEVELS = {FCST_VAR7_LEVELS} OBS_VAR7_OPTIONS = cnt_thresh = [ >0 ]; cnt_logic = INTERSECTION From 768b3afd90ebbc2b4c7425a8fc7c2564eddfc451 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Thu, 21 Nov 2024 18:27:35 +0000 Subject: [PATCH 07/19] Update exevs_refs_snowfall_plots.sh --- scripts/plots/cam/exevs_refs_snowfall_plots.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/plots/cam/exevs_refs_snowfall_plots.sh b/scripts/plots/cam/exevs_refs_snowfall_plots.sh index 880be95e8e..e5d99905f5 100755 --- a/scripts/plots/cam/exevs_refs_snowfall_plots.sh +++ b/scripts/plots/cam/exevs_refs_snowfall_plots.sh @@ -251,7 +251,7 @@ for stats in ets fbias fss ; do for domain in conus conus_east conus_west conus_south conus_central ; do if [ -s ${score_type}_regional_${domain}_${valid}_${level}_${var}_${stats}_${lead}.png ] ; then - mv ${score_type}_regional_${domain}_${valid}_${level}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid_all_times.buk_${domain}.png + mv ${score_type}_regional_${domain}_${valid}_${level}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid00z12z.buk_${domain}.png fi done @@ -275,7 +275,7 @@ for var in weasd ; do for domain in conus conus_east conus_west conus_south conus_central ; do if [ -s ${score_type}_regional_${domain}_${valid}_${level}_${var}_${lead}.png ] ; then - mv ${score_type}_regional_${domain}_${valid}_${level}_${var}_${lead}.png evs.refs.ctc.${var}_${level}.last${last_days}days.${scoretype}_valid_all_times.buk_${domain}.png + mv ${score_type}_regional_${domain}_${valid}_${level}_${var}_${lead}.png evs.refs.ctc.${var}_${level}.last${last_days}days.${scoretype}_valid00z12z.buk_${domain}.png fi done done From 9e14bc399e375b8fcd07300be9d1ea16e3c999dc Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Tue, 3 Dec 2024 22:54:04 +0000 Subject: [PATCH 08/19] update spatial map scripts --- ush/cam/ush_refs_plot_py/refs_atmos_plots.py | 34 ++----------- .../refs_atmos_plots_precip_spatial_map.py | 5 +- .../refs_atmos_plots_specs.py | 2 +- ush/cam/ush_refs_plot_py/refs_atmos_util.py | 50 +++---------------- 4 files changed, 16 insertions(+), 75 deletions(-) mode change 100755 => 100644 ush/cam/ush_refs_plot_py/refs_atmos_plots.py mode change 100755 => 100644 ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py mode change 100755 => 100644 ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py mode change 100755 => 100644 ush/cam/ush_refs_plot_py/refs_atmos_util.py diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py old mode 100755 new mode 100644 index e731380f8d..e5a5f9c831 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py @@ -106,10 +106,10 @@ logger.info(logger_info) if len(model_list) > 10: - logger.error("TOO MANY MODELS LISTED ("+str(len(model_list)) + logger.error("FATAL ERROR: TOO MANY MODELS LISTED ("+str(len(model_list)) +", ["+', '.join(model_list)+"]), maximum is 10") sys.exit(1) - +''' # Condense .stat files logger.info("Condensing model .stat files for job") for model_idx in range(len(model_list)): @@ -119,10 +119,12 @@ +'.stat') if VERIF_CASE == 'grid2grid' and VERIF_TYPE == 'pres_levs': obs_name = truth_name_list[model_idx] + gda_util.condense_model_stat_files(logger, stat_base_dir, condensed_model_stat_file, model, obs_name, grid, vx_mask, fcst_var_name, obs_var_name, line_type) +''' # Set up model information dictionary original_model_info_dict = {} @@ -182,7 +184,7 @@ for v in range(len(fcst_var_prod)): var_info.append((fcst_var_prod[v], obs_var_prod[v])) else: - logger.error("FORECAST AND OBSERVATION VARIABLE INFORMATION NOT THE " + logger.error("FATAL ERROR: FORECAST AND OBSERVATION VARIABLE INFORMATION NOT THE " +"SAME LENGTH") sys.exit(1) @@ -587,30 +589,4 @@ else: logger.warning(plot+" not recongized") -# Create tar file of jobs plots and move to main image directory -#job_output_image_dir = os.path.join(job_output_dir, 'images') -#cwd = os.getcwd() -#if len(glob.glob(job_output_image_dir+'/*')) != 0: -# os.chdir(job_output_image_dir) -# tar_file = os.path.join(job_output_image_dir, -# job_name.replace('/','_')+'.tar') -# if os.path.exists(tar_file): -# os.remove(tar_file) -# logger.debug("Make tar file "+tar_file+" from "+job_output_image_dir) -# gda_util.run_shell_command( -# ['tar', '-cvf', tar_file, '*'] -# ) -# logger.debug(f"Moving {tar_file} to {VERIF_TYPE_image_dir}") -# gda_util.run_shell_command( -# ['mv', tar_file, VERIF_TYPE_image_dir+'/.'] -# ) -# os.chdir(cwd) -#else: -# logger.warning(f"No images generated in {job_output_image_dir}") - -# Clean up -#if evs_run_mode == 'production': -# logger.info(f"Removing {job_output_dir}") -# shutil.rmtree(job_output_dir) - print("END: "+os.path.basename(__file__)) diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py old mode 100755 new mode 100644 index bbe3cd6047..82bd7a1763 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py @@ -102,7 +102,6 @@ def make_precip_spatial_map(self): 'goldenrod', 'yellow'] cmap_over_color_mm = '#ffaeb9' # Set Cartopy shapefile location - #config['data_dir'] = f"{os.environ['cartopyDataDir']}" config['data_dir'] = config['repo_data_dir'] # Read in data self.logger.info(f"Reading in model files from {self.input_dir}") @@ -184,13 +183,13 @@ def make_precip_spatial_map(self): file_valid_time, '%Y%m%d_%H%M%S' ) if valid_date_dt != file_valid_time_dt: - self.logger.error(f"FILE VALID TIME {file_valid_time_dt} " + self.logger.error(f"FATAL ERROR: FILE VALID TIME {file_valid_time_dt} " +"DOES NOT MATCH EXPECTED VALID TIME " +f"{valid_date_dt}") sys.exit(1) if model_num != 'obs': if init_date_dt != file_init_time_dt: - self.logger.error(f"FILE INIT TIME {file_init_time_dt} " + self.logger.error(f"FATAL ERROR: FILE INIT TIME {file_init_time_dt} " +"DOES NOT MATCH EXPECTED INIT TIME " +f"{init_date_dt}") sys.exit(1) diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py old mode 100755 new mode 100644 index 59233016d3..2b7fe3c0af --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py @@ -123,7 +123,7 @@ def __init__(self, logger, plot_type): self.legend_font_size = 16 self.fig_title_size = 18 else: - self.logger.error(f"{self.plot_type} NOT RECOGNIZED") + self.logger.error(f"FATAL ERROR: {self.plot_type} NOT RECOGNIZED") sys.exit(1) def set_up_plot(self): diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_util.py b/ush/cam/ush_refs_plot_py/refs_atmos_util.py old mode 100755 new mode 100644 index 0295a7bd02..20ebb5ca77 --- a/ush/cam/ush_refs_plot_py/refs_atmos_util.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_util.py @@ -29,7 +29,7 @@ def run_shell_command(command): else: run_command = subprocess.run(command) if run_command.returncode != 0: - print("ERROR: "+' '.join(run_command.args)+" gave return code " + print("FATAL ERROR: "+' '.join(run_command.args)+" gave return code " +str(run_command.returncode)) def metplus_command(conf_file_name): @@ -51,7 +51,7 @@ def metplus_command(conf_file_name): os.environ['RUN']+'_'+os.environ['VERIF_CASE'], conf_file_name) if not os.path.exists(conf_file): - print("ERROR: "+conf_file+" DOES NOT EXIST") + print("FATAL ERROR: "+conf_file+" DOES NOT EXIST") sys.exit(1) metplus_cmd = run_metplus+' -c '+machine_conf+' -c '+conf_file return metplus_cmd @@ -70,7 +70,7 @@ def python_command(python_script_name, script_arg_list): python_script = os.path.join(os.environ['USHevs'], os.environ['COMPONENT'], python_script_name) if not os.path.exists(python_script): - print("ERROR: "+python_script+" DOES NOT EXIST") + print("FATAL ERROR: "+python_script+" DOES NOT EXIST") sys.exit(1) python_cmd = 'python '+python_script for script_arg in script_arg_list: @@ -727,7 +727,6 @@ def prep_prod_dwd_file(source_file, dest_file, forecast_hour, prep_method): # Working file names prepped_file = os.path.join(os.getcwd(), 'atmos.'+dest_file.rpartition('/')[2]) - #working_file1 = prepped_file+'.tmp1' #### For DWD to run through pcpconform, file name must be #### dwd_YYYYMMDDHH_(hhh)_(hhh).tmp working_file1 = os.path.join(os.getcwd(), @@ -872,37 +871,6 @@ def prep_prod_osi_saf_file(daily_source_file_format, daily_dest_file, merged_var[:] = merged_var_vals merged_data.close() copy_file(daily_prepped_file, daily_dest_file) - # Prep weekly file - #for weekly_source_file in weekly_source_file_list: - # if not os.path.exists(weekly_source_file): - # print(f"WARNING: {weekly_source_file} does not exist, " - # +"not using in weekly average file") - # weekly_source_file_list.remove(weekly_source_file) - #if len(weekly_source_file_list) == 7: - # ncea_cmd_list = ['ncea'] - # for weekly_source_file in weekly_source_file_list: - # ncea_cmd_list.append(weekly_source_file) - # ncea_cmd_list.append('-o') - # ncea_cmd_list.append(weekly_prepped_file) - # run_shell_command(ncea_cmd_list) - # if check_file_exists_size(weekly_prepped_file): - # weekly_data = netcdf.Dataset(weekly_prepped_file, 'a', - # format='NETCDF3_CLASSIC') - # weekly_data.setncattr( - # 'start_date', weekly_dates[0].strftime('%Y-%m-%d %H:%M:%S') - # ) - # osi_saf_date_since_dt = datetime.datetime.strptime( - # '1978-01-01 00:00:00','%Y-%m-%d %H:%M:%S' - # ) - # weekly_data.variables['time_bnds'][:] = [ - # (weekly_dates[0] - osi_saf_date_since_dt).total_seconds(), - # weekly_data.variables['time_bnds'][:][0][1] - # ] - # weekly_data.close() - #else: - # print("Not enough files to make "+weekly_prepped_file - # +": "+' '.join(weekly_source_file_list)) - #copy_file(weekly_prepped_file, weekly_dest_file) def prep_prod_ghrsst_ospo_file(source_file, dest_file, date_dt): """! Do prep work for GHRSST OSPO production files @@ -1538,7 +1506,7 @@ def get_obs_valid_hrs(obs): valid_hr_end = obs_valid_hr_dict[obs]['valid_hr_end'] valid_hr_inc = obs_valid_hr_dict[obs]['valid_hr_inc'] else: - print(f"ERROR: Cannot get {obs} valid hour information") + print(f"FATAL ERROR: Cannot get {obs} valid hour information") sys.exit(1) return valid_hr_start, valid_hr_end, valid_hr_inc @@ -1807,7 +1775,7 @@ def get_met_line_type_cols(logger, met_root, met_version, met_line_type): line_type_cols = line.split(' : ')[-1] break else: - logger.error(f"{met_minor_version_col_file} DOES NOT EXISTS, " + logger.error(f"FATAL ERROR: {met_minor_version_col_file} DOES NOT EXISTS, " +"cannot determine MET data column structure") sys.exit(1) met_version_line_type_col_list = ( @@ -1879,10 +1847,8 @@ def condense_model_stat_files(logger, input_dir, output_file, model, obs, ) for model_stat_file in model_stat_files: logger.debug(f"Getting data from {model_stat_file}") - grep = subprocess.run( - 'grep -R "'+model+' " '+model_stat_file+grep_opts, - shell=True, capture_output=True, encoding="utf8" - ) + grep = subprocess.run('grep -R "'+model+'" '+model_stat_file+grep_opts, + shell=True, capture_output=True, encoding="utf8") logger.debug(f"Ran {ps.args}") all_grep_output = all_grep_output+grep.stdout @@ -2506,7 +2472,7 @@ def calculate_stat(logger, data_df, line_type, stat): if line_type == 'CTC': stat_df = 1 - (FY_ON/(FY_ON + FY_OY)) else: - logger.error(stat+" IS NOT AN OPTION") + logger.error("FATAL ERROR: "+stat+" IS NOT AN OPTION") sys.exit(1) idx = 0 idx_dict = {} From 0b9745775b278d4fffaef8f9e675e2cb507ef07b Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Tue, 3 Dec 2024 23:25:34 +0000 Subject: [PATCH 09/19] Update spatial map scripts again --- ush/cam/ush_refs_plot_py/refs_atmos_plots.py | 4 ++-- .../refs_atmos_plots_precip_spatial_map.py | 4 ++-- ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py | 2 +- ush/cam/ush_refs_plot_py/refs_atmos_util.py | 12 ++++++------ 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py index e5a5f9c831..a5f8bd422a 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py @@ -106,7 +106,7 @@ logger.info(logger_info) if len(model_list) > 10: - logger.error("FATAL ERROR: TOO MANY MODELS LISTED ("+str(len(model_list)) + logger.error("TOO MANY MODELS LISTED ("+str(len(model_list)) +", ["+', '.join(model_list)+"]), maximum is 10") sys.exit(1) ''' @@ -184,7 +184,7 @@ for v in range(len(fcst_var_prod)): var_info.append((fcst_var_prod[v], obs_var_prod[v])) else: - logger.error("FATAL ERROR: FORECAST AND OBSERVATION VARIABLE INFORMATION NOT THE " + logger.error("FORECAST AND OBSERVATION VARIABLE INFORMATION NOT THE " +"SAME LENGTH") sys.exit(1) diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py index 82bd7a1763..184cfd7c4e 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py @@ -183,13 +183,13 @@ def make_precip_spatial_map(self): file_valid_time, '%Y%m%d_%H%M%S' ) if valid_date_dt != file_valid_time_dt: - self.logger.error(f"FATAL ERROR: FILE VALID TIME {file_valid_time_dt} " + self.logger.error(f"FILE VALID TIME {file_valid_time_dt} " +"DOES NOT MATCH EXPECTED VALID TIME " +f"{valid_date_dt}") sys.exit(1) if model_num != 'obs': if init_date_dt != file_init_time_dt: - self.logger.error(f"FATAL ERROR: FILE INIT TIME {file_init_time_dt} " + self.logger.error(f"FILE INIT TIME {file_init_time_dt} " +"DOES NOT MATCH EXPECTED INIT TIME " +f"{init_date_dt}") sys.exit(1) diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py index 2b7fe3c0af..59233016d3 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py @@ -123,7 +123,7 @@ def __init__(self, logger, plot_type): self.legend_font_size = 16 self.fig_title_size = 18 else: - self.logger.error(f"FATAL ERROR: {self.plot_type} NOT RECOGNIZED") + self.logger.error(f"{self.plot_type} NOT RECOGNIZED") sys.exit(1) def set_up_plot(self): diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_util.py b/ush/cam/ush_refs_plot_py/refs_atmos_util.py index 20ebb5ca77..1af5c2a36a 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_util.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_util.py @@ -29,7 +29,7 @@ def run_shell_command(command): else: run_command = subprocess.run(command) if run_command.returncode != 0: - print("FATAL ERROR: "+' '.join(run_command.args)+" gave return code " + print("ERROR: "+' '.join(run_command.args)+" gave return code " +str(run_command.returncode)) def metplus_command(conf_file_name): @@ -51,7 +51,7 @@ def metplus_command(conf_file_name): os.environ['RUN']+'_'+os.environ['VERIF_CASE'], conf_file_name) if not os.path.exists(conf_file): - print("FATAL ERROR: "+conf_file+" DOES NOT EXIST") + print("ERROR: "+conf_file+" DOES NOT EXIST") sys.exit(1) metplus_cmd = run_metplus+' -c '+machine_conf+' -c '+conf_file return metplus_cmd @@ -70,7 +70,7 @@ def python_command(python_script_name, script_arg_list): python_script = os.path.join(os.environ['USHevs'], os.environ['COMPONENT'], python_script_name) if not os.path.exists(python_script): - print("FATAL ERROR: "+python_script+" DOES NOT EXIST") + print("ERROR: "+python_script+" DOES NOT EXIST") sys.exit(1) python_cmd = 'python '+python_script for script_arg in script_arg_list: @@ -1506,7 +1506,7 @@ def get_obs_valid_hrs(obs): valid_hr_end = obs_valid_hr_dict[obs]['valid_hr_end'] valid_hr_inc = obs_valid_hr_dict[obs]['valid_hr_inc'] else: - print(f"FATAL ERROR: Cannot get {obs} valid hour information") + print(f"ERROR: Cannot get {obs} valid hour information") sys.exit(1) return valid_hr_start, valid_hr_end, valid_hr_inc @@ -1775,7 +1775,7 @@ def get_met_line_type_cols(logger, met_root, met_version, met_line_type): line_type_cols = line.split(' : ')[-1] break else: - logger.error(f"FATAL ERROR: {met_minor_version_col_file} DOES NOT EXISTS, " + logger.error(f"{met_minor_version_col_file} DOES NOT EXISTS, " +"cannot determine MET data column structure") sys.exit(1) met_version_line_type_col_list = ( @@ -2472,7 +2472,7 @@ def calculate_stat(logger, data_df, line_type, stat): if line_type == 'CTC': stat_df = 1 - (FY_ON/(FY_ON + FY_OY)) else: - logger.error("FATAL ERROR: "+stat+" IS NOT AN OPTION") + logger.error(stat+" IS NOT AN OPTION") sys.exit(1) idx = 0 idx_dict = {} From 6b21719289b9aa3902d4193d5042792cabec432f Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Wed, 4 Dec 2024 14:30:12 +0000 Subject: [PATCH 10/19] Update ush/cam/ush_refs_plot_py/refs_atmos_plots.py --- ush/cam/ush_refs_plot_py/refs_atmos_plots.py | 377 +------------------ 1 file changed, 1 insertion(+), 376 deletions(-) diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py index a5f8bd422a..a163bcc96d 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py @@ -109,22 +109,6 @@ logger.error("TOO MANY MODELS LISTED ("+str(len(model_list)) +", ["+', '.join(model_list)+"]), maximum is 10") sys.exit(1) -''' -# Condense .stat files -logger.info("Condensing model .stat files for job") -for model_idx in range(len(model_list)): - model = model_list[model_idx] - condensed_model_stat_file = os.path.join(job_output_dir, 'model' - +str(model_idx+1)+'_'+model - +'.stat') - if VERIF_CASE == 'grid2grid' and VERIF_TYPE == 'pres_levs': - obs_name = truth_name_list[model_idx] - - gda_util.condense_model_stat_files(logger, stat_base_dir, - condensed_model_stat_file, model, - obs_name, grid, vx_mask, fcst_var_name, - obs_var_name, line_type) -''' # Set up model information dictionary original_model_info_dict = {} @@ -201,325 +185,7 @@ date_info_dict = original_date_info_dict.copy() plot_info_dict = original_plot_info_dict.copy() met_info_dict = original_met_info_dict.copy() - if plot == 'time_series': - import refs_atmos_plots_time_series as gdap_ts - for ts_info in \ - list(itertools.product(valid_hrs, fhrs, var_info, - interp_points_list)): - date_info_dict['valid_hr_start'] = str(ts_info[0]) - date_info_dict['valid_hr_end'] = str(ts_info[0]) - date_info_dict['valid_hr_inc'] = '24' - date_info_dict['forecast_hour'] = str(ts_info[1]) - plot_info_dict['fcst_var_name'] = ts_info[2][0][0] - plot_info_dict['fcst_var_level'] = ts_info[2][0][1] - plot_info_dict['fcst_var_thresh'] = ts_info[2][0][2] - plot_info_dict['obs_var_name'] = ts_info[2][1][0] - plot_info_dict['obs_var_level'] = ts_info[2][1][1] - plot_info_dict['obs_var_thresh'] = ts_info[2][1][2] - plot_info_dict['interp_points'] = str(ts_info[3]) - init_hr = gda_util.get_init_hour( - int(date_info_dict['valid_hr_start']), - int(date_info_dict['forecast_hour']) - ) - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if init_hr in init_hrs: - if not os.path.exists(image_name): - make_ts = True - else: - make_ts = False - else: - make_ts = False - if make_ts: - plot_ts = gdap_ts.TimeSeries(logger, job_output_dir, - job_output_dir, model_info_dict, - date_info_dict, plot_info_dict, - met_info_dict, logo_dir) - plot_ts.make_time_series() - elif plot == 'lead_average': - import refs_atmos_plots_lead_average as gdap_la - for la_info in \ - list(itertools.product(valid_hrs, var_info, - interp_points_list)): - date_info_dict['valid_hr_start'] = str(la_info[0]) - date_info_dict['valid_hr_end'] = str(la_info[0]) - date_info_dict['valid_hr_inc'] = '24' - date_info_dict['forecast_hours'] = fhrs - plot_info_dict['fcst_var_name'] = la_info[1][0][0] - plot_info_dict['fcst_var_level'] = la_info[1][0][1] - plot_info_dict['fcst_var_thresh'] = la_info[1][0][2] - plot_info_dict['obs_var_name'] = la_info[1][1][0] - plot_info_dict['obs_var_level'] = la_info[1][1][1] - plot_info_dict['obs_var_thresh'] = la_info[1][1][2] - plot_info_dict['interp_points'] = str(la_info[2]) - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if not os.path.exists(image_name): - if len(date_info_dict['forecast_hours']) <= 1: - logger.warning("No span of forecast hours to plot, " - +"given 1 forecast hour, skipping " - +"lead_average plots") - make_la = False - else: - if plot_info_dict['stat'] == 'FBAR_OBAR': - make_la = False - else: - make_la = True - else: - make_la = False - if make_la: - plot_la = gdap_la.LeadAverage(logger, job_output_dir, - job_output_dir, model_info_dict, - date_info_dict, plot_info_dict, - met_info_dict, logo_dir) - plot_la.make_lead_average() - elif plot == 'valid_hour_average': - import refs_atmos_plots_valid_hour_average as gdap_vha - for vha_info in \ - list(itertools.product(var_info, interp_points_list)): - date_info_dict['valid_hr_start'] = valid_hr_start - date_info_dict['valid_hr_end'] = valid_hr_end - date_info_dict['valid_hr_inc'] = valid_hr_inc - date_info_dict['forecast_hours'] = fhrs - plot_info_dict['fcst_var_name'] = vha_info[0][0][0] - plot_info_dict['fcst_var_level'] = vha_info[0][0][1] - plot_info_dict['fcst_var_thresh'] = vha_info[0][0][2] - plot_info_dict['obs_var_name'] = vha_info[0][1][0] - plot_info_dict['obs_var_level'] = vha_info[0][1][1] - plot_info_dict['obs_var_thresh'] = vha_info[0][1][2] - plot_info_dict['interp_points'] = str(vha_info[1]) - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if not os.path.exists(image_name): - if date_info_dict['valid_hr_start'] \ - == date_info_dict['valid_hr_end']: - logger.warning("No span of valid hours to plot, " - +"valid start hour is the same as " - +"valid end hour, skipping " - +"valid_hour_average plots") - make_vha = False - else: - if plot_info_dict['stat'] == 'FBAR_OBAR': - make_vha = False - else: - make_vha = True - else: - make_vha = False - if make_vha: - plot_vha = gdap_vha.ValidHourAverage(logger, job_output_dir, - job_output_dir, - model_info_dict, - date_info_dict, - plot_info_dict, - met_info_dict, logo_dir) - plot_vha.make_valid_hour_average() - elif plot == 'threshold_average': - import refs_atmos_plots_threshold_average as gdap_ta - for ta_info in \ - list(itertools.product(valid_hrs, fhrs, interp_points_list)): - date_info_dict['valid_hr_start'] = str(ta_info[0]) - date_info_dict['valid_hr_end'] = str(ta_info[0]) - date_info_dict['valid_hr_inc'] = '24' - date_info_dict['forecast_hour'] = str(ta_info[1]) - plot_info_dict['fcst_var_name'] = fcst_var_name - plot_info_dict['obs_var_name'] = obs_var_name - plot_info_dict['fcst_var_threshs'] = fcst_var_thresh_list - plot_info_dict['obs_var_name'] = obs_var_name - plot_info_dict['obs_var_threshs'] = obs_var_thresh_list - plot_info_dict['interp_points'] = str(ta_info[2]) - init_hr = gda_util.get_init_hour( - int(date_info_dict['valid_hr_start']), - int(date_info_dict['forecast_hour']) - ) - for l in range(len(fcst_var_level_list)): - plot_info_dict['fcst_var_level'] = fcst_var_level_list[l] - plot_info_dict['obs_var_level'] = obs_var_level_list[l] - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if init_hr in init_hrs: - if not os.path.exists(image_name): - if len(plot_info_dict['fcst_var_threshs']) <= 1: - logger.warning("No span of thresholds to plot, " - +"given 1 threshold, skipping " - +"threshold_average plots") - make_ta = False - else: - if plot_info_dict['stat'] == 'FBAR_OBAR': - make_ta = False - else: - make_ta = True - else: - make_ta = False - else: - make_ta = False - if make_ta: - plot_ta = gdap_ta.ThresholdAverage(logger, job_output_dir, - job_output_dir, - model_info_dict, - date_info_dict, - plot_info_dict, - met_info_dict, - logo_dir) - plot_ta.make_threshold_average() - elif plot == 'lead_by_date': - import refs_atmos_plots_lead_by_date as gdap_lbd - for lbd_info in \ - list(itertools.product(valid_hrs, var_info, - interp_points_list)): - date_info_dict['valid_hr_start'] = str(lbd_info[0]) - date_info_dict['valid_hr_end'] = str(lbd_info[0]) - date_info_dict['valid_hr_inc'] = '24' - date_info_dict['forecast_hours'] = fhrs - plot_info_dict['fcst_var_name'] = lbd_info[1][0][0] - plot_info_dict['fcst_var_level'] = lbd_info[1][0][1] - plot_info_dict['fcst_var_thresh'] = lbd_info[1][0][2] - plot_info_dict['obs_var_name'] = lbd_info[1][1][0] - plot_info_dict['obs_var_level'] = lbd_info[1][1][1] - plot_info_dict['obs_var_thresh'] = lbd_info[1][1][2] - plot_info_dict['interp_points'] = str(lbd_info[2]) - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if not os.path.exists(image_name): - if len(date_info_dict['forecast_hours']) <= 1: - logger.warning("No span of forecast hours to plot, " - +"given 1 forecast hour, skipping " - +"lead_by_date plots") - make_lbd = False - else: - if plot_info_dict['stat'] == 'FBAR_OBAR': - make_lbd = False - else: - make_lbd = True - else: - make_lbd = False - if make_lbd: - plot_lbd = gdap_lbd.LeadByDate(logger, job_output_dir, - job_output_dir, model_info_dict, - date_info_dict, plot_info_dict, - met_info_dict, logo_dir) - plot_lbd.make_lead_by_date() - elif plot == 'stat_by_level': - import refs_atmos_plots_stat_by_level as gdap_sbl - vert_profiles = ['all', 'trop', 'strat', 'ltrop', 'utrop'] - for sbl_info in \ - list(itertools.product(valid_hrs, fhrs, interp_points_list, - vert_profiles)): - date_info_dict['valid_hr_start'] = str(sbl_info[0]) - date_info_dict['valid_hr_end'] = str(sbl_info[0]) - date_info_dict['valid_hr_inc'] = '24' - date_info_dict['forecast_hour'] = str(sbl_info[1]) - plot_info_dict['fcst_var_name'] = fcst_var_name - plot_info_dict['obs_var_name'] = obs_var_name - plot_info_dict['interp_points'] = str(sbl_info[2]) - plot_info_dict['vert_profile'] = sbl_info[3] - init_hr = gda_util.get_init_hour( - int(date_info_dict['valid_hr_start']), - int(date_info_dict['forecast_hour']) - ) - plot_info_dict['fcst_var_level'] = sbl_info[3] - plot_info_dict['obs_var_level'] = sbl_info[3] - for t in range(len(fcst_var_thresh_list)): - plot_info_dict['fcst_var_thresh'] = fcst_var_thresh_list[t] - plot_info_dict['obs_var_thresh'] = obs_var_thresh_list[t] - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if init_hr in init_hrs: - if not os.path.exists(image_name): - if plot_info_dict['stat'] == 'FBAR_OBAR': - make_sbl = False - else: - make_sbl = True - else: - make_sbl = False - else: - make_sbl = False - del plot_info_dict['fcst_var_level'] - del plot_info_dict['obs_var_level'] - if make_sbl: - plot_sbl = gdap_sbl.StatByLevel(logger, job_output_dir, - job_output_dir, - model_info_dict, - date_info_dict, - plot_info_dict, - met_info_dict, - logo_dir) - plot_sbl.make_stat_by_level() - elif plot == 'lead_by_level': - import refs_atmos_plots_lead_by_level as gdap_lbl - if evs_run_mode == 'production' and int(fhr_inc) == 6: - fhrs_lbl = list( - range(int(fhr_start), int(fhr_end)+int(fhr_inc), 12) - ) - else: - fhrs_lbl = fhrs - vert_profiles = ['all', 'trop', 'strat', 'ltrop', 'utrop'] - for lbl_info in \ - list(itertools.product(valid_hrs, interp_points_list, - vert_profiles)): - date_info_dict['valid_hr_start'] = str(lbl_info[0]) - date_info_dict['valid_hr_end'] = str(lbl_info[0]) - date_info_dict['valid_hr_inc'] = '24' - date_info_dict['forecast_hours'] = fhrs_lbl - plot_info_dict['fcst_var_name'] = fcst_var_name - plot_info_dict['obs_var_name'] = obs_var_name - plot_info_dict['interp_points'] = str(lbl_info[1]) - plot_info_dict['vert_profile'] = lbl_info[2] - plot_info_dict['fcst_var_level'] = lbl_info[2] - plot_info_dict['obs_var_level'] = lbl_info[2] - for t in range(len(fcst_var_thresh_list)): - plot_info_dict['fcst_var_thresh'] = fcst_var_thresh_list[t] - plot_info_dict['obs_var_thresh'] = obs_var_thresh_list[t] - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if not os.path.exists(image_name): - if len(date_info_dict['forecast_hours']) <= 1: - logger.warning("No span of forecast hours to plot, " - +"given 1 forecast hour, skipping " - +"lead_by_level plots") - else: - if plot_info_dict['stat'] == 'FBAR_OBAR': - make_lbl = False - else: - make_lbl = True - else: - make_lbl = False - del plot_info_dict['fcst_var_level'] - del plot_info_dict['obs_var_level'] - if make_lbl: - plot_lbl = gdap_lbl.LeadByLevel(logger, job_output_dir, - job_output_dir, - model_info_dict, - date_info_dict, - plot_info_dict, - met_info_dict, logo_dir) - plot_lbl.make_lead_by_level() - elif plot == 'nohrsc_spatial_map': - import refs_atmos_plots_nohrsc_spatial_map as gdap_nsm - nohrsc_data_dir = os.path.join(VERIF_CASE_STEP_dir, 'data', 'nohrsc') - date_info_dict['valid_hr_start'] = str(valid_hrs[0]) - date_info_dict['valid_hr_end'] = str(valid_hrs[0]) - date_info_dict['valid_hr_inc'] = '24' - plot_info_dict['obs_var_name'] = obs_var_name - plot_info_dict['obs_var_level'] = obs_var_level_list[0] - plot_nsm = gdap_nsm.NOHRSCSpatialMap(logger, nohrsc_data_dir, - job_output_dir, date_info_dict, - plot_info_dict, logo_dir) - plot_nsm.make_nohrsc_spatial_map() - elif plot == 'precip_spatial_map': + if plot == 'precip_spatial_map': model_info_dict['obs'] = {'name': 'ccpa', 'plot_name': 'ccpa', 'obs_name': '24hrCCPA'} @@ -545,47 +211,6 @@ plot_info_dict, met_info_dict, logo_dir) plot_psm.make_precip_spatial_map() - elif plot == 'performance_diagram': - import refs_atmos_plots_performance_diagram as gdap_pd - for pd_info in \ - list(itertools.product(valid_hrs, fhrs, interp_points_list)): - date_info_dict['valid_hr_start'] = str(pd_info[0]) - date_info_dict['valid_hr_end'] = str(pd_info[0]) - date_info_dict['valid_hr_inc'] = '24' - date_info_dict['forecast_hour'] = str(pd_info[1]) - plot_info_dict['fcst_var_name'] = fcst_var_name - plot_info_dict['obs_var_name'] = obs_var_name - plot_info_dict['fcst_var_threshs'] = fcst_var_thresh_list - plot_info_dict['obs_var_name'] = obs_var_name - plot_info_dict['obs_var_threshs'] = obs_var_thresh_list - plot_info_dict['interp_points'] = str(pd_info[2]) - init_hr = gda_util.get_init_hour( - int(date_info_dict['valid_hr_start']), - int(date_info_dict['forecast_hour']) - ) - for l in range(len(fcst_var_level_list)): - plot_info_dict['fcst_var_level'] = fcst_var_level_list[l] - plot_info_dict['obs_var_level'] = obs_var_level_list[l] - image_name = plot_specs.get_savefig_name( - os.path.join(job_output_dir, 'images'), - plot_info_dict, date_info_dict - ) - if init_hr in init_hrs: - if not os.path.exists(image_name): - make_pd = True - else: - make_pd = False - else: - make_pd = False - if make_pd: - plot_pd = gdap_pd.PerformanceDiagram(logger, job_output_dir, - job_output_dir, - model_info_dict, - date_info_dict, - plot_info_dict, - met_info_dict, - logo_dir) - plot_pd.make_performance_diagram() else: logger.warning(plot+" not recongized") From dbf5de1325bde6d86eb13ddb4271894625999858 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Wed, 4 Dec 2024 15:43:28 +0000 Subject: [PATCH 11/19] Delete refs_atmos_util.py --- ush/cam/ush_refs_plot_py/refs_atmos_plots.py | 1 - .../refs_atmos_plots_precip_spatial_map.py | 1 - .../refs_atmos_plots_specs.py | 1 - ush/cam/ush_refs_plot_py/refs_atmos_util.py | 2527 ----------------- 4 files changed, 2530 deletions(-) delete mode 100644 ush/cam/ush_refs_plot_py/refs_atmos_util.py diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py index a163bcc96d..56bf811b28 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots.py @@ -14,7 +14,6 @@ import subprocess import itertools import shutil -import refs_atmos_util as gda_util from refs_atmos_plots_specs import PlotSpecs print("BEGIN: "+os.path.basename(__file__)) diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py index 184cfd7c4e..c41f61e354 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots_precip_spatial_map.py @@ -22,7 +22,6 @@ import cartopy.feature as cfeature from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter from cartopy import config -import refs_atmos_util as gda_util from refs_atmos_plots_specs import PlotSpecs class PrecipSpatialMap: diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py b/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py index 59233016d3..cae42c152e 100644 --- a/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py +++ b/ush/cam/ush_refs_plot_py/refs_atmos_plots_specs.py @@ -6,7 +6,6 @@ import sys import os import numpy as np -import refs_atmos_util as gda_util class PlotSpecs: def __init__(self, logger, plot_type): diff --git a/ush/cam/ush_refs_plot_py/refs_atmos_util.py b/ush/cam/ush_refs_plot_py/refs_atmos_util.py deleted file mode 100644 index 1af5c2a36a..0000000000 --- a/ush/cam/ush_refs_plot_py/refs_atmos_util.py +++ /dev/null @@ -1,2527 +0,0 @@ -#! /usr/bin/env python3 - -import os -import datetime -import numpy as np -import subprocess -import shutil -import sys -import netCDF4 as netcdf -import numpy as np -import glob -import pandas as pd -from time import sleep - -def run_shell_command(command): - """! Run shell command - - Args: - command - list of agrument entries (string) - - Returns: - - """ - print("Running "+' '.join(command)) - if any(mark in ' '.join(command) for mark in ['"', "'", '|', '*', '>']): - run_command = subprocess.run( - ' '.join(command), shell=True - ) - else: - run_command = subprocess.run(command) - if run_command.returncode != 0: - print("ERROR: "+' '.join(run_command.args)+" gave return code " - +str(run_command.returncode)) - -def metplus_command(conf_file_name): - """! Write out full call to METplus - - Args: - conf_file_name - METplus conf file name (string) - - Returns: - metplus_cmd - full call to METplus (string) - - """ - run_metplus = os.path.join(os.environ['METPLUS_PATH'], 'ush', - 'run_metplus.py') - machine_conf = os.path.join(os.environ['PARMevs'], 'metplus_config', - 'machine.conf') - conf_file = os.path.join(os.environ['PARMevs'], 'metplus_config', - os.environ['STEP'], os.environ['COMPONENT'], - os.environ['RUN']+'_'+os.environ['VERIF_CASE'], - conf_file_name) - if not os.path.exists(conf_file): - print("ERROR: "+conf_file+" DOES NOT EXIST") - sys.exit(1) - metplus_cmd = run_metplus+' -c '+machine_conf+' -c '+conf_file - return metplus_cmd - -def python_command(python_script_name, script_arg_list): - """! Write out full call to python - - Args: - python_script_name - python script name (string) - script_arg_list - list of script agruments (strings) - - Returns: - python_cmd - full call to python (string) - - """ - python_script = os.path.join(os.environ['USHevs'], os.environ['COMPONENT'], - python_script_name) - if not os.path.exists(python_script): - print("ERROR: "+python_script+" DOES NOT EXIST") - sys.exit(1) - python_cmd = 'python '+python_script - for script_arg in script_arg_list: - python_cmd = python_cmd+' '+script_arg - return python_cmd - -def check_file_exists_size(file_name): - """! Checks to see if file exists and has size greater than 0 - - Args: - file_name - file path (string) - - Returns: - file_good - boolean - - True: file exists,file size >0 - - False: file doesn't exist - OR file size = 0 - """ - if os.path.exists(file_name): - if os.path.getsize(file_name) > 0: - file_good = True - else: - print("WARNING: "+file_name+" empty, 0 sized") - file_good = False - else: - print("WARNING: "+file_name+" does not exist") - file_good = False - return file_good - -def copy_file(source_file, dest_file): - """! This copies a file from one location to another - - Args: - source_file - source file path (string) - dest_file - destination file path (string) - - Returns: - """ - if check_file_exists_size(source_file): - print("Copying "+source_file+" to "+dest_file) - shutil.copy2(source_file, dest_file) - -def convert_grib1_grib2(grib1_file, grib2_file): - """! Converts GRIB1 data to GRIB2 - - Args: - grib1_file - string of the path to - the GRIB1 file to - convert (string) - grib2_file - string of the path to - save the converted GRIB2 - file (string) - Returns: - """ - print("Converting GRIB1 file "+grib1_file+" " - +"to GRIB2 file "+grib2_file) - cnvgrib = os.environ['CNVGRIB'] - os.system(cnvgrib+' -g12 '+grib1_file+' ' - +grib2_file+' > /dev/null 2>&1') - -def convert_grib2_grib1(grib2_file, grib1_file): - """! Converts GRIB2 data to GRIB1 - - Args: - grib2_file - string of the path to - the GRIB2 file to - convert - grib1_file - string of the path to - save the converted GRIB1 - file - Returns: - """ - print("Converting GRIB2 file "+grib2_file+" " - +"to GRIB1 file "+grib1_file) - cnvgrib = os.environ['CNVGRIB'] - os.system(cnvgrib+' -g21 '+grib2_file+' ' - +grib1_file+' > /dev/null 2>&1') - -def convert_grib2_grib2(grib2_fileA, grib2_fileB): - """! Converts GRIB2 data to GRIB2 - - Args: - grib2_fileA - string of the path to - the GRIB2 file to - convert - grib2_fileB - string of the path to - save the converted GRIB2 - file - Returns: - """ - print("Converting GRIB2 file "+grib2_fileA+" " - +"to GRIB2 file "+grib2_fileB) - cnvgrib = os.environ['CNVGRIB'] - os.system(cnvgrib+' -g22 '+grib2_fileA+' ' - +grib2_fileB+' > /dev/null 2>&1') - -def get_time_info(date_start, date_end, date_type, init_hr_list, valid_hr_list, - fhr_list): - """! Creates a list of dictionaries containing information - on the valid dates and times, the initialization dates - and times, and forecast hour pairings - - Args: - date_start - verification start date - (string, format:YYYYmmdd) - date_end - verification end_date - (string, format:YYYYmmdd) - date_type - how to treat date_start and - date_end (string, values:VALID or INIT) - init_hr_list - list of initialization hours - (string) - valid_hr_list - list of valid hours (string) - fhr_list - list of forecasts hours (string) - - Returns: - time_info - list of dictionaries with the valid, - initalization, and forecast hour - pairings - """ - valid_hr_zfill2_list = [hr.zfill(2) for hr in valid_hr_list] - init_hr_zfill2_list = [hr.zfill(2) for hr in init_hr_list] - if date_type == 'VALID': - date_type_hr_list = valid_hr_zfill2_list - elif date_type == 'INIT': - date_type_hr_list = init_hr_zfill2_list - date_type_hr_start = date_type_hr_list[0] - date_type_hr_end = date_type_hr_list[-1] - if len(date_type_hr_list) > 1: - date_type_hr_inc = np.min( - np.diff(np.array(date_type_hr_list, dtype=int)) - ) - else: - date_type_hr_inc = 24 - date_start_dt = datetime.datetime.strptime(date_start+date_type_hr_start, - '%Y%m%d%H') - date_end_dt = datetime.datetime.strptime(date_end+date_type_hr_end, - '%Y%m%d%H') - time_info = [] - date_dt = date_start_dt - while date_dt <= date_end_dt: - if date_type == 'VALID': - valid_time_dt = date_dt - elif date_type == 'INIT': - init_time_dt = date_dt - for fhr in fhr_list: - if fhr == 'anl': - forecast_hour = 0 - else: - forecast_hour = int(fhr) - if date_type == 'VALID': - init_time_dt = (valid_time_dt - - datetime.timedelta(hours=forecast_hour)) - elif date_type == 'INIT': - valid_time_dt = (init_time_dt - + datetime.timedelta(hours=forecast_hour)) - if valid_time_dt.strftime('%H') in valid_hr_zfill2_list \ - and init_time_dt.strftime('%H') in init_hr_zfill2_list: - t = {} - t['valid_time'] = valid_time_dt - t['init_time'] = init_time_dt - t['forecast_hour'] = str(forecast_hour) - time_info.append(t) - date_dt = date_dt + datetime.timedelta(hours=int(date_type_hr_inc)) - return time_info - -def get_init_hour(valid_hour, forecast_hour): - """! Get a initialization hour/cycle - - Args: - valid_hour - valid hour (integer) - forecast_hour - forecast hour (integer) - """ - init_hour = 24 + (valid_hour - (forecast_hour%24)) - if forecast_hour % 24 == 0: - init_hour = valid_hour - else: - init_hour = 24 + (valid_hour - (forecast_hour%24)) - if init_hour >= 24: - init_hour = init_hour - 24 - return init_hour - -def format_filler(unfilled_file_format, valid_time_dt, init_time_dt, - forecast_hour, str_sub_dict): - """! Creates a filled file path from a format - - Args: - unfilled_file_format - file naming convention (string) - valid_time_dt - valid time (datetime) - init_time_dt - initialization time (datetime) - forecast_hour - forecast hour (string) - str_sub_dict - other strings to substitue (dictionary) - Returns: - filled_file_format - file_format filled in with verifying - time information (string) - """ - filled_file_format = '/' - format_opt_list = ['lead', 'lead_shift', 'valid', 'valid_shift', - 'init', 'init_shift'] - if len(list(str_sub_dict.keys())) != 0: - format_opt_list = format_opt_list+list(str_sub_dict.keys()) - for filled_file_format_chunk in unfilled_file_format.split('/'): - for format_opt in format_opt_list: - nformat_opt = ( - filled_file_format_chunk.count('{'+format_opt+'?fmt=') - ) - if nformat_opt > 0: - format_opt_count = 1 - while format_opt_count <= nformat_opt: - if format_opt in ['lead_shift', 'valid_shift', - 'init_shift']: - shift = (filled_file_format_chunk \ - .partition('shift=')[2] \ - .partition('}')[0]) - format_opt_count_fmt = ( - filled_file_format_chunk \ - .partition('{'+format_opt+'?fmt=')[2] \ - .rpartition('?')[0] - ) - else: - format_opt_count_fmt = ( - filled_file_format_chunk \ - .partition('{'+format_opt+'?fmt=')[2] \ - .partition('}')[0] - ) - if format_opt == 'valid': - replace_format_opt_count = valid_time_dt.strftime( - format_opt_count_fmt - ) - elif format_opt == 'lead': - if format_opt_count_fmt == '%1H': - if int(forecast_hour) < 10: - replace_format_opt_count = forecast_hour[1] - else: - replace_format_opt_count = forecast_hour - elif format_opt_count_fmt == '%2H': - replace_format_opt_count = forecast_hour.zfill(2) - elif format_opt_count_fmt == '%3H': - replace_format_opt_count = forecast_hour.zfill(3) - else: - replace_format_opt_count = forecast_hour - elif format_opt == 'init': - replace_format_opt_count = init_time_dt.strftime( - format_opt_count_fmt - ) - elif format_opt == 'lead_shift': - shift = (filled_file_format_chunk.partition('shift=')[2]\ - .partition('}')[0]) - forecast_hour_shift = str(int(forecast_hour) - + int(shift)) - if format_opt_count_fmt == '%1H': - if int(forecast_hour_shift) < 10: - replace_format_opt_count = ( - forecast_hour_shift[1] - ) - else: - replace_format_opt_count = forecast_hour_shift - elif format_opt_count_fmt == '%2H': - replace_format_opt_count = ( - forecast_hour_shift.zfill(2) - ) - elif format_opt_count_fmt == '%3H': - replace_format_opt_count = ( - forecast_hour_shift.zfill(3) - ) - else: - replace_format_opt_count = forecast_hour_shift - elif format_opt == 'init_shift': - shift = (filled_file_format_chunk.partition('shift=')[2]\ - .partition('}')[0]) - init_shift_time_dt = ( - init_time_dt + datetime.timedelta(hours=int(shift)) - ) - replace_format_opt_count = init_shift_time_dt.strftime( - format_opt_count_fmt - ) - elif format_opt == 'valid_shift': - shift = (filled_file_format_chunk.partition('shift=')[2]\ - .partition('}')[0]) - valid_shift_time_dt = ( - valid_time_dt + datetime.timedelta(hours=int(shift)) - ) - replace_format_opt_count = valid_shift_time_dt.strftime( - format_opt_count_fmt - ) - else: - replace_format_opt_count = str_sub_dict[format_opt] - if format_opt in ['lead_shift', 'valid_shift', 'init_shift']: - filled_file_format_chunk = ( - filled_file_format_chunk.replace( - '{'+format_opt+'?fmt=' - +format_opt_count_fmt - +'?shift='+shift+'}', - replace_format_opt_count - ) - ) - else: - filled_file_format_chunk = ( - filled_file_format_chunk.replace( - '{'+format_opt+'?fmt=' - +format_opt_count_fmt+'}', - replace_format_opt_count - ) - ) - format_opt_count+=1 - filled_file_format = os.path.join(filled_file_format, - filled_file_format_chunk) - return filled_file_format - -def prep_prod_gfs_file(source_file, dest_file, forecast_hour, prep_method): - """! Do prep work for GFS production files - - Args: - source_file_format - source file format (string) - dest_file - destination file (string) - forecast_hour - forecast hour (string) - prep_method - name of prep method to do - (string) - - Returns: - """ - # Environment variables and executables - WGRIB2 = os.environ['WGRIB2'] - EXECevs = os.environ['EXECevs'] - # Working file names - prepped_file = os.path.join(os.getcwd(), - 'atmos.'+dest_file.rpartition('/')[2]) - working_file1 = prepped_file+'.tmp1' - # Prep file - if prep_method == 'full': - if forecast_hour == 0: - wgrib_fhr = 'anl' - else: - wgrib_fhr = forecast_hour - thin_var_level_list = [ - 'CAPE:surface', - 'CAPE:90-0 mb above ground', - 'CWAT:entire atmosphere (considered as a single layer)', - 'DPT:2 m above ground', - 'GUST:surface', - 'HGT:1000 mb', 'HGT:925 mb', 'HGT:850 mb', 'HGT:700 mb', - 'HGT:500 mb', 'HGT:400 mb', 'HGT:300 mb', 'HGT:250 mb', - 'HGT:200 mb', 'HGT:150 mb', 'HGT:100 mb', 'HGT:50 mb','HGT:20 mb', - 'HGT:10 mb', 'HGT:5 mb', 'HGT:1 mb', 'HGT:cloud ceiling', - 'HGT:tropopause', - 'HPBL:surface', - 'ICEC:surface', - 'ICETK:surface', - 'LHTFL:surface', - 'O3MR:925 mb', 'O3MR:100 mb', 'O3MR:70 mb', 'O3MR:50 mb', - 'O3MR:30 mb', 'O3MR:20 mb', 'O3MR:10 mb', 'O3MR:5 mb', 'O3MR:1 mb', - 'PRES:surface', 'PRES:tropopause', - 'PRMSL:mean sea level', - 'PWAT:entire atmosphere (considered as a single layer)', - 'RH:1000 mb', 'RH:925 mb', 'RH:850 mb', 'RH:700 mb', 'RH:500 mb', - 'RH:400 mb', 'RH:300 mb', 'RH:250 mb', 'RH:200 mb', 'RH:150 mb', - 'RH:100 mb', 'RH:50 mb','RH:20 mb', 'RH:10 mb', 'RH:5 mb', - 'RH:1 mb', 'RH:2 m above ground', - 'SHTFL:surface', - 'SNOD:surface', - 'SPFH:1000 mb', 'SPFH:925 mb', 'SPFH:850 mb', 'SPFH:700 mb', - 'SPFH:500 mb', 'SPFH:400 mb', 'SPFH:300 mb', 'SPFH:250 mb', - 'SPFH:200 mb', 'SPFH:150 mb', 'SPFH:100 mb', 'SPFH:50 mb', - 'SPFH:20 mb', 'SPFH:10 mb', 'SPFH:5 mb', 'SPFH:1 mb', - 'SPFH:2 m above ground', - 'SOILW:0-0.1 m below ground', - 'TCDC:entire atmosphere:'+wgrib_fhr, - 'TMP:1000 mb', 'TMP:925 mb', 'TMP:850 mb', 'TMP:700 mb', - 'TMP:500 mb', 'TMP:400 mb', 'TMP:300 mb', 'TMP:250 mb', - 'TMP:200 mb', 'TMP:150 mb', 'TMP:100 mb', 'TMP:50 mb', - 'TMP:20 mb', 'TMP:10 mb', 'TMP:5 mb', 'TMP:1 mb', - 'TMP:2 m above ground', 'TMP:surface', 'TMP:tropopause', - 'TOZNE:entire atmosphere (considered as a single layer)', - 'TSOIL:0-0.1 m below ground', - 'UGRD:1000 mb', 'UGRD:925 mb', 'UGRD:850 mb', 'UGRD:700 mb', - 'UGRD:500 mb', 'UGRD:400 mb', 'UGRD:300 mb', 'UGRD:250 mb', - 'UGRD:200 mb', 'UGRD:150 mb', 'UGRD:100 mb', 'UGRD:50 mb', - 'UGRD:20 mb', 'UGRD:10 mb', 'UGRD:5 mb', 'UGRD:1 mb', - 'UGRD:10 m above ground', - 'VGRD:1000 mb', 'VGRD:925 mb', 'VGRD:850 mb', 'VGRD:700 mb', - 'VGRD:500 mb', 'VGRD:400 mb', 'VGRD:300 mb', 'VGRD:250 mb', - 'VGRD:200 mb', 'VGRD:150 mb', 'VGRD:100 mb', 'VGRD:50 mb', - 'VGRD:20 mb', 'VGRD:10 mb', 'VGRD:5 mb', 'VGRD:1 mb', - 'VGRD:10 m above ground', - 'VIS:surface', - 'WEASD:surface' - ] - # Missing in GFS files: Sea Ice Drift (Velocity) - SICED?? - # Sea Ice Extent - Derived from ICEC? - # Sea Ice Volume - if check_file_exists_size(source_file): - run_shell_command(['>', prepped_file]) - for thin_var_level in thin_var_level_list: - run_shell_command([WGRIB2, '-match', '"'+thin_var_level+'"', - source_file+'|'+WGRIB2, '-i', source_file, - '-grib', working_file1]) - run_shell_command(['cat', working_file1, '>>', prepped_file]) - os.remove(working_file1) - elif 'precip' in prep_method: - if int(forecast_hour) % 24 == 0: - thin_var_level = ('APCP:surface:0-' - +str(int(int(forecast_hour)/24))) - else: - thin_var_level = ('APCP:surface:0-'+forecast_hour) - if check_file_exists_size(source_file): - run_shell_command([WGRIB2, '-match', '"'+thin_var_level+'"', - source_file+'|'+WGRIB2, '-i', source_file, - '-grib', prepped_file]) - copy_file(prepped_file, dest_file) - -def prep_prod_fnmoc_file(source_file, dest_file, forecast_hour, - prep_method): - """! Do prep work for FNMOC production files - - Args: - source_file - source file format (string) - dest_file - destination file (string) - forecast_hour - forecast hour (string) - prep_method - name of prep method to do - (string) - - Returns: - """ - # Environment variables and executables - # Working file names - prepped_file = os.path.join(os.getcwd(), - 'atmos.'+dest_file.rpartition('/')[2]) - # Prep file - if check_file_exists_size(source_file): - convert_grib2_grib2(source_file, prepped_file) - copy_file(prepped_file, dest_file) - - -def prep_prod_jma_file(source_file_format, dest_file, forecast_hour, - prep_method): - """! Do prep work for JMA production files - - Args: - source_file_format - source file format (string) - dest_file - destination file (string) - forecast_hour - forecast hour (string) - prep_method - name of prep method to do - (string) - - Returns: - """ - # Environment variables and executables - WGRIB = os.environ['WGRIB'] - EXECevs = os.environ['EXECevs'] - JMAMERGE = os.path.join(EXECevs, 'jma_merge') - # Working file names - prepped_file = os.path.join(os.getcwd(), - 'atmos.'+dest_file.rpartition('/')[2]) - working_file1 = prepped_file+'.tmp1' - working_file2 = prepped_file+'.tmp2' - # Prep file - if prep_method == 'full': - if forecast_hour == 'anl': - wgrib_fhr = ':anl' - elif int(forecast_hour) == 0: - wgrib_fhr = ':anl' - else: - wgrib_fhr = ':'+forecast_hour+'hr' - for hem in ['n', 's']: - hem_source_file = source_file_format.replace('{hem?fmt=str}', hem) - if hem == 'n': - working_file = working_file1 - elif hem == 's': - working_file = working_file2 - if check_file_exists_size(hem_source_file): - run_shell_command( - [WGRIB+' '+hem_source_file+' | grep "'+wgrib_fhr+'" | ' - +WGRIB+' '+hem_source_file+' -i -grib -o ' - +working_file] - ) - if check_file_exists_size(working_file1) \ - and check_file_exists_size(working_file2): - run_shell_command( - [JMAMERGE, working_file1, working_file2, prepped_file] - ) - elif 'precip' in prep_method: - source_file = source_file_format - if check_file_exists_size(source_file): - run_shell_command( - [WGRIB+' '+source_file+' | grep "0-' - +forecast_hour+'hr" | '+WGRIB+' '+source_file - +' -i -grib -o '+prepped_file] - ) - copy_file(prepped_file, dest_file) - -def prep_prod_ecmwf_file(source_file, dest_file, forecast_hour, prep_method): - """! Do prep work for ECMWF production files - - Args: - source_file - source file format (string) - dest_file - destination file (string) - forecast_hour - forecast hour (string) - prep_method - name of prep method to do - (string) - - Returns: - """ - # Environment variables and executables - EXECevs = os.environ['EXECevs'] - ECMGFSLOOKALIKENEW = os.path.join(EXECevs, 'ecm_gfs_look_alike_new') - PCPCONFORM = os.path.join(EXECevs, 'pcpconform') - WGRIB = os.environ['WGRIB'] - # Working file names - prepped_file = os.path.join(os.getcwd(), - 'atmos.'+dest_file.rpartition('/')[2]) - working_file1 = prepped_file+'.tmp1' - # Prep file - if prep_method == 'full': - if forecast_hour == 'anl': - wgrib_fhr = ':anl' - elif int(forecast_hour) == 0: - wgrib_fhr = ':anl' - else: - wgrib_fhr = ':'+forecast_hour+'hr' - if check_file_exists_size(source_file): - run_shell_command( - [WGRIB+' '+source_file+' | grep "'+wgrib_fhr+'" | ' - +WGRIB+' '+source_file+' -i -grib -o ' - +working_file1] - ) - if check_file_exists_size(working_file1): - run_shell_command(['chmod', '750', working_file1]) - run_shell_command(['chgrp', 'rstprod', working_file1]) - run_shell_command( - [ECMGFSLOOKALIKENEW, working_file1, prepped_file] - ) - elif 'precip' in prep_method: - if check_file_exists_size(source_file): - run_shell_command( - [PCPCONFORM, 'ecmwf', source_file, prepped_file] - ) - if os.path.exists(prepped_file): - run_shell_command(['chmod', '750', prepped_file]) - run_shell_command(['chgrp', 'rstprod', prepped_file]) - copy_file(prepped_file, dest_file) - -def prep_prod_ukmet_file(source_file_format, dest_file, forecast_hour, - prep_method): - """! Do prep work for UKMET production files - - Args: - source_file_format - source file format (string) - dest_file - destination file (string) - forecast_hour - forecast hour (string) - prep_method - name of prep method to do - (string) - - Returns: - """ - # Environment variables and executables - EXECevs = os.environ['EXECevs'] - WGRIB = os.environ['WGRIB'] - WGRIB2 = os.environ['WGRIB2'] - UKMHIRESMERGE = os.path.join(EXECevs, 'ukm_hires_merge') - # Working file names - prepped_file = os.path.join(os.getcwd(), - 'atmos.'+dest_file.rpartition('/')[2]) - working_file1 = prepped_file+'.tmp1' - working_file2 = prepped_file+'.tmp2' - # Prep file - if prep_method == 'full': - ukmet_fhr_id_dict = { - 'anl': 'AAT', - '0': 'AAT', - '6': 'BBT', - '12': 'CCT', - '18': 'DDT', - '24': 'EET', - '30': 'FFT', - '36': 'GGT', - '42': 'HHT', - '48': 'IIT', - '54': 'JJT', - '60': 'JJT', - '66': 'KKT', - '72': 'KKT', - '78': 'QQT', - '84': 'LLT', - '90': 'TTT', - '96': 'MMT', - '102': 'UUT', - '108': 'NNT', - '114': 'VVT', - '120': 'OOT', - '126': '11T', - '132': 'PPA', - '138': '22T', - '144': 'PPA' - } - if forecast_hour in list(ukmet_fhr_id_dict.keys()): - if forecast_hour == 'anl': - fhr_id = ukmet_fhr_id_dict['anl'] - fhr_str = '0' - wgrib_fhr = 'anl' - else: - fhr_id = ukmet_fhr_id_dict[forecast_hour] - fhr_str = forecast_hour - if forecast_hour == '0': - wgrib_fhr = 'anl' - else: - wgrib_fhr = forecast_hour+'hr' - source_file = source_file_format.replace('{letter?fmt=str}', - fhr_id) - if check_file_exists_size(source_file): - run_shell_command( - [WGRIB+' '+source_file+' | grep "'+wgrib_fhr - +'" | '+WGRIB+' '+source_file+' -i -grib -o ' - +working_file1] - ) - if check_file_exists_size(working_file1): - run_shell_command([UKMHIRESMERGE, working_file1, - prepped_file, fhr_str]) - elif 'precip' in prep_method: - source_file = source_file_format - source_file_accum = 12 - if check_file_exists_size(source_file): - run_shell_command( - [WGRIB2+' '+source_file+' -if ":TWATP:" -set_var "APCP" ' - +'-fi -grib '+working_file1] - ) - if check_file_exists_size(working_file1): - convert_grib2_grib1(working_file1, working_file2) - if check_file_exists_size(working_file2): - source_file_accum_fhr_start = ( - int(forecast_hour) - source_file_accum - ) - run_shell_command( - [WGRIB+' '+working_file2+' | grep "' - +str(source_file_accum_fhr_start)+'-' - +forecast_hour+'hr" | '+WGRIB+' '+working_file2 - +' -i -grib -o '+prepped_file] - ) - copy_file(prepped_file, dest_file) - -def prep_prod_dwd_file(source_file, dest_file, forecast_hour, prep_method): - """! Do prep work for DWD production files - - Args: - source_file_format - source file format (string) - dest_file - destination file (string) - forecast_hour - forecast hour (string) - prep_method - name of prep method to do - (string) - - Returns: - """ - # Environment variables and executables - EXECevs = os.environ['EXECevs'] - PCPCONFORM = os.path.join(EXECevs, 'pcpconform') - # Working file names - prepped_file = os.path.join(os.getcwd(), - 'atmos.'+dest_file.rpartition('/')[2]) - #### For DWD to run through pcpconform, file name must be - #### dwd_YYYYMMDDHH_(hhh)_(hhh).tmp - working_file1 = os.path.join(os.getcwd(), - source_file.rpartition('/')[2]+'.tmp') - # Prep file - if 'precip' in prep_method: - if check_file_exists_size(source_file): - convert_grib2_grib1(source_file, working_file1) - if check_file_exists_size(working_file1): - run_shell_command( - [PCPCONFORM, 'dwd', working_file1, - prepped_file] - ) - copy_file(prepped_file, dest_file) - -def prep_prod_metfra_file(source_file, dest_file, forecast_hour, prep_method): - """! Do prep work for METRFRA production files - - Args: - source_file - source file(string) - dest_file - destination file (string) - forecast_hour - forecast hour (string) - prep_method - name of prep method to do - (string) - - Returns: - """ - # Environment variables and executables - EXECevs = os.environ['EXECevs'] - WGRIB = os.environ['WGRIB'] - # Temporary file names - prepped_file = os.path.join(os.getcwd(), - 'atmos.'+dest_file.rpartition('/')[2]) - # Prep file - if 'precip' in prep_method: - file_accum = 24 - fhr_accum_start = int(forecast_hour)-file_accum - if check_file_exists_size(source_file): - run_shell_command( - [WGRIB+' '+source_file+' | grep "' - +str(fhr_accum_start)+'-' - +forecast_hour+'hr" | '+WGRIB+' '+source_file - +' -i -grib -o '+prepped_file] - ) - copy_file(prepped_file, dest_file) - -def prep_prod_osi_saf_file(daily_source_file_format, daily_dest_file, - weekly_source_file_list, weekly_dest_file, - weekly_dates): - """! Do prep work for OSI-SAF production files - - Args: - daily_source_file_format - daily source file format (string) - daily_dest_file - daily destination file (string) - weekly_source_file_list - list of daily files to make up - weekly average file - weekly_dest_file - weekly destination file (string) - weekly_dates - date span for weekly dates (tuple - of datetimes) - Returns: - """ - # Environment variables and executables - FIXevs = os.environ['FIXevs'] - CDO_ROOT = os.environ['CDO_ROOT'] - # Temporary file names - daily_prepped_file = os.path.join(os.getcwd(), 'atmos.' - +daily_dest_file.rpartition('/')[2]) - weekly_prepped_file = os.path.join(os.getcwd(), 'atmos.' - +weekly_dest_file.rpartition('/')[2]) - # Prep daily file - for hem in ['nh', 'sh']: - hem_source_file = daily_source_file_format.replace('{hem?fmt=str}', - hem) - hem_dest_file = daily_dest_file.replace('multi.', 'multi.'+hem+'.') - hem_prepped_file = os.path.join(os.getcwd(), 'atmos.' - +hem_dest_file.rpartition('/')[2]) - if check_file_exists_size(hem_source_file): - run_shell_command( - [os.path.join(CDO_ROOT, 'bin', 'cdo'), - 'remapbil,' - +os.path.join(FIXevs, 'cdo_grids', 'G004.grid'), - hem_source_file, hem_prepped_file] - ) - if hem == 'nh': - nh_prepped_file = hem_prepped_file - elif hem == 'sh': - sh_prepped_file = hem_prepped_file - if check_file_exists_size(nh_prepped_file) \ - and check_file_exists_size(sh_prepped_file): - nh_data = netcdf.Dataset(nh_prepped_file) - sh_data = netcdf.Dataset(sh_prepped_file) - merged_data = netcdf.Dataset(daily_prepped_file, 'w', - format='NETCDF3_CLASSIC') - for attr in nh_data.ncattrs(): - if attr == 'history': - merged_data.setncattr( - attr, nh_data.getncattr(attr)+' ' - +sh_data.getncattr(attr) - ) - elif attr == 'southernmost_latitude': - merged_data.setncattr(attr, '-90') - elif attr == 'area': - merged_data.setncattr(attr, 'Global') - else: - merged_data.setncattr(attr, nh_data.getncattr(attr)) - for dim in list(nh_data.dimensions.keys()): - merged_data.createDimension(dim, len(nh_data.dimensions[dim])) - for var in ['time', 'time_bnds', 'lat', 'lon']: - merged_var = merged_data.createVariable( - var, nh_data.variables[var].datatype, - nh_data.variables[var].dimensions - ) - for k in nh_data.variables[var].ncattrs(): - merged_var.setncatts( - {k: nh_data.variables[var].getncattr(k)} - ) - if var == 'time': - merged_var[:] = nh_data.variables[var][:] + 43200 - else: - merged_var[:] = nh_data.variables[var][:] - for var in ['ice_conc', 'ice_conc_unfiltered', 'masks', - 'confidence_level', 'status_flag', 'total_uncertainty', - 'smearing_uncertainty', 'algorithm_uncertainty']: - merged_var = merged_data.createVariable( - var, nh_data.variables[var].datatype, - ('lat', 'lon') - ) - for k in nh_data.variables[var].ncattrs(): - if k == 'long_name': - merged_var.setncatts( - {k: nh_data.variables[var].getncattr(k)\ - .replace('northern hemisphere', 'globe')} - ) - else: - merged_var.setncatts( - {k: nh_data.variables[var].getncattr(k)} - ) - merged_var_vals = np.ma.masked_equal( - np.vstack((sh_data.variables[var][0,:180,:], - nh_data.variables[var][0,180:,:])) - ,nh_data.variables[var]._FillValue) - merged_var[:] = merged_var_vals - merged_data.close() - copy_file(daily_prepped_file, daily_dest_file) - -def prep_prod_ghrsst_ospo_file(source_file, dest_file, date_dt): - """! Do prep work for GHRSST OSPO production files - - Args: - source_file - source file (string) - dest_file - destination file (string) - date_dt - date (datetime object) - Returns: - """ - # Environment variables and executables - # Temporary file names - prepped_file = os.path.join(os.getcwd(), 'atmos.' - +source_file.rpartition('/')[2]) - # Prep file - copy_file(source_file, prepped_file) - if check_file_exists_size(prepped_file): - prepped_data = netcdf.Dataset(prepped_file, 'a', - format='NETCDF3_CLASSIC') - ghrsst_ospo_date_since_dt = datetime.datetime.strptime( - '1981-01-01 00:00:00','%Y-%m-%d %H:%M:%S' - ) - prepped_data['time'][:] = prepped_data['time'][:][0] + 43200 - prepped_data.close() - copy_file(prepped_file, dest_file) -def get_model_file(valid_time_dt, init_time_dt, forecast_hour, - source_file_format, dest_file_format): - """! This get a model file and saves it in the specificed - destination - - Args: - valid_time_dt - valid time (datetime) - init_time_dt - initialization time (datetime) - forecast_hour - forecast hour (string) - source_file_format - source file format (string) - dest_file_format - destination file format (string) - - - Returns: - """ - dest_file = format_filler(dest_file_format, valid_time_dt, - init_time_dt, forecast_hour, {}) - if not os.path.exists(dest_file): - source_file = format_filler(source_file_format, valid_time_dt, - init_time_dt, forecast_hour, {}) - if 'dcom/navgem' in source_file: - prep_prod_fnmoc_file(source_file, dest_file, forecast_hour, 'full') - elif 'wgrbbul/jma_' in source_file: - prep_prod_jma_file(source_file, dest_file, forecast_hour, 'full') - elif 'wgrbbul/ecmwf' in source_file: - prep_prod_ecmwf_file(source_file, dest_file, forecast_hour, 'full') - elif 'wgrbbul/ukmet_hires' in source_file: - prep_prod_ukmet_file(source_file, dest_file, forecast_hour, 'full') - elif 'qpf_verif/jma' in source_file: - prep_prod_jma_file(source_file, dest_file, forecast_hour, - 'precip') - elif 'qpf_verif/UWD' in source_file: - prep_prod_ecmwf_file(source_file, dest_file, forecast_hour, - 'precip') - elif 'qpf_verif/ukmo' in source_file: - prep_prod_ukmet_file(source_file, dest_file, forecast_hour, - 'precip') - elif 'qpf_verif/dwd' in source_file: - prep_prod_dwd_file(source_file, dest_file, forecast_hour, - 'precip') - elif 'qpf_verif/METFRA' in source_file: - prep_prod_metfra_file(source_file, dest_file, forecast_hour, - 'precip') - else: - if os.path.exists(source_file): - print("Linking "+source_file+" to "+dest_file) - os.symlink(source_file, dest_file) - else: - print("WARNING: "+source_file+" DOES NOT EXIST") - -def get_truth_file(valid_time_dt, source_file_format, dest_file_format): - """! This get a model file and saves it in the specificed - destination - - Args: - valid_time_dt - valid time (datetime) - source_file_format - source file format (string) - dest_file_format - destination file format (string) - - - Returns: - """ - dest_file = format_filler(dest_file_format, valid_time_dt, - valid_time_dt, ['anl'], {}) - if not os.path.exists(dest_file): - source_file = format_filler(source_file_format, valid_time_dt, - valid_time_dt, ['anl'], {}) - if os.path.exists(source_file): - print("Linking "+source_file+" to "+dest_file) - os.symlink(source_file, dest_file) - else: - print("WARNING: "+source_file+" DOES NOT EXIST") - -def check_model_files(job_dict): - """! Check what model files or don't exist - - Args: - job_dict - dictionary containing settings - job is running with (strings) - - Returns: - model_files_exist - if non-zero number of model files - exist or not (boolean) - fhr_list - list of forecast hours that model - files exist for (string) - """ - valid_date_dt = datetime.datetime.strptime( - job_dict['DATE']+job_dict['valid_hr_start'], - '%Y%m%d%H' - ) - verif_case_dir = os.path.join( - job_dict['DATA'], job_dict['VERIF_CASE']+'_'+job_dict['STEP'] - ) - model = job_dict['MODEL'] - fhr_min = int(job_dict['fhr_start']) - fhr_max = int(job_dict['fhr_end']) - fhr_inc = int(job_dict['fhr_inc']) - fhr = fhr_min - fhr_list = [] - fhr_check_dict = {} - while fhr <= fhr_max: - fhr_check_dict[str(fhr)] = {} - init_date_dt = valid_date_dt - datetime.timedelta(hours=fhr) - if job_dict['JOB_GROUP'] == 'reformat_data': - model_file_format = os.path.join(verif_case_dir, 'data', model, - model+'.{init?fmt=%Y%m%d%H}.' - +'f{lead?fmt=%3H}') - if job_dict['VERIF_CASE'] == 'grid2grid': - if job_dict['VERIF_TYPE'] == 'pres_levs' \ - and job_dict['job_name'] == 'GeoHeightAnom': - if init_date_dt.strftime('%H') in ['00', '12'] \ - and fhr % 24 == 0: - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - fhr_check_dict[str(fhr)]['file2'] = { - 'valid_date': valid_date_dt, - 'init_date': (valid_date_dt - -datetime.timedelta(hours=fhr-12)), - 'forecast_hour': str(fhr-12) - } - elif job_dict['VERIF_TYPE'] in ['sea_ice', 'sst']: - fhr_avg_end = fhr - fhr_avg_start = fhr-24 - fhr_in_avg = fhr_avg_start - nf = 0 - while fhr_in_avg <= fhr_avg_end: - fhr_check_dict[str(fhr)]['file'+str(nf+1)] = { - 'valid_date': valid_date_dt, - 'init_date': valid_date_dt-datetime.timedelta( - hours=fhr_in_avg - ), - 'forecast_hour': str(fhr_in_avg) - } - nf+=1 - fhr_in_avg+=int(job_dict['fhr_inc']) - else: - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - if job_dict['VERIF_CASE'] == 'grid2obs': - if job_dict['VERIF_TYPE'] == 'ptype': - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - elif job_dict['JOB_GROUP'] == 'assemble_data': - if job_dict['VERIF_CASE'] == 'grid2grid': - if job_dict['VERIF_TYPE'] in ['precip_accum24hr', - 'precip_accum3hr']: - model_file_format = os.path.join(verif_case_dir, 'data', - model, model+'.precip.' - +'{init?fmt=%Y%m%d%H}.' - +'f{lead?fmt=%3H}') - elif job_dict['VERIF_TYPE'] == 'pres_levs' \ - and job_dict['job_name'] == 'DailyAvg_GeoHeightAnom': - model_file_format = os.path.join(verif_case_dir, - 'METplus_output', - job_dict['RUN']+'.' - +'{valid?fmt=%Y%m%d}', - job_dict['MODEL'], - job_dict['VERIF_CASE'], - 'anomaly_' - +job_dict['VERIF_TYPE']+'_' - +job_dict['job_name']\ - .replace('DailyAvg_', '') - +'_init' - +'{init?fmt=%Y%m%d%H}_' - +'fhr{lead?fmt=%3H}.nc') - elif job_dict['VERIF_TYPE'] in ['sea_ice', 'sst']: - model_file_format = os.path.join(verif_case_dir, - 'METplus_output', - job_dict['RUN']+'.' - +'{valid?fmt=%Y%m%d}', - job_dict['MODEL'], - job_dict['VERIF_CASE'], - 'grid_stat_' - +job_dict['VERIF_TYPE']+'_' - +job_dict['job_name']\ - .replace('DailyAvg_', '') - +'_{lead?fmt=%2H}0000L_' - +'{valid?fmt=%Y%m%d}_' - +'{valid?fmt=%H}0000V_' - +'pairs.nc') - else: - model_file_format = os.path.join(verif_case_dir, 'data', - model, model - +'.{init?fmt=%Y%m%d%H}.' - +'f{lead?fmt=%3H}') - if job_dict['VERIF_TYPE'] in ['precip_accum24hr', - 'precip_accum3hr']: - precip_accum = int( - job_dict['VERIF_TYPE'].replace('precip_accum','')\ - .replace('hr','') - ) - fhr_in_accum_list = [str(fhr)] - if job_dict['MODEL_accum'][0] == '{': #continuous - if fhr-precip_accum > 0: - fhr_in_accum_list.append(str(fhr-precip_accum)) - elif int(job_dict['MODEL_accum']) < precip_accum: - nfiles_in_accum = int( - precip_accum/int(job_dict['MODEL_accum']) - ) - nf = 1 - while nf <= nfiles_in_accum: - fhr_nf = fhr - ((nf-1)*int(job_dict['MODEL_accum'])) - if fhr_nf > 0: - fhr_in_accum_list.append(str(fhr_nf)) - nf+=1 - for fhr_in_accum in fhr_in_accum_list: - file_num = fhr_in_accum_list.index(fhr_in_accum)+1 - fhr_check_dict[str(fhr)]['file'+str(file_num)] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr_in_accum) - } - elif job_dict['VERIF_TYPE'] == 'snow': - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - fhr_check_dict[str(fhr)]['file2'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr-24) - } - else: - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - elif job_dict['VERIF_CASE'] == 'grid2obs': - model_file_format = os.path.join(verif_case_dir, 'data', - model, model - +'.{init?fmt=%Y%m%d%H}.' - +'f{lead?fmt=%3H}') - if job_dict['VERIF_TYPE'] == 'sfc' \ - and job_dict['job_name'] == 'TempAnom2m': - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - fhr_check_dict[str(fhr)]['file2'] = { - 'valid_date': valid_date_dt, - 'init_date': (valid_date_dt - -datetime.timedelta(hours=fhr-12)), - 'forecast_hour': str(fhr-12) - } - elif job_dict['VERIF_TYPE'] == 'ptype': - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - elif job_dict['JOB_GROUP'] == 'generate_stats': - if job_dict['VERIF_CASE'] == 'grid2grid': - if job_dict['VERIF_TYPE'] == 'pres_levs' \ - and job_dict['job_name'] == 'DailyAvg_GeoHeightAnom': - model_file_format = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.{valid?fmt=%Y%m%d}', - model, job_dict['VERIF_CASE'], 'daily_avg_' - +job_dict['VERIF_TYPE']+'_'+job_dict['job_name'] - +'_init{init?fmt=%Y%m%d%H}_' - +'valid{valid_shift?fmt=%Y%m%d%H?shift=-12}' - +'to{valid?fmt=%Y%m%d%H}.nc' - ) - elif job_dict['VERIF_TYPE'] == 'pres_levs' \ - and job_dict['job_name'] == 'WindShear': - model_file_format = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.{valid?fmt=%Y%m%d}', - model, job_dict['VERIF_CASE'], 'wind_shear_' - +job_dict['VERIF_TYPE']+'_'+job_dict['job_name'] - +'_init{init?fmt=%Y%m%d%H}_fhr{lead?fmt=%3H}.nc' - ) - elif job_dict['VERIF_TYPE'] in ['precip_accum24hr', - 'precip_accum3hr']: - precip_accum = (job_dict['VERIF_TYPE']\ - .replace('precip_accum','')) - model_file_format = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.{valid?fmt=%Y%m%d}', - model, job_dict['VERIF_CASE'], 'pcp_combine_' - +job_dict['VERIF_TYPE']+'_'+precip_accum+'Accum_init' - +'{init?fmt=%Y%m%d%H}_fhr{lead?fmt=%3H}.nc' - ) - elif job_dict['VERIF_TYPE'] == 'sea_ice': - model_file_format = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.{valid?fmt=%Y%m%d}', - model, job_dict['VERIF_CASE'], 'daily_avg_' - +job_dict['VERIF_TYPE']+'_'+job_dict['job_name'] - +'_init{init?fmt=%Y%m%d%H}_' - +'valid{valid_shift?fmt=%Y%m%d%H?shift=-24}' - +'to{valid?fmt=%Y%m%d%H}.nc' - ) - elif job_dict['VERIF_TYPE'] == 'snow': - model_file_format = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.{valid?fmt=%Y%m%d}', - model, job_dict['VERIF_CASE'], 'pcp_combine_' - +job_dict['VERIF_TYPE']+'_24hrAccum_' - +job_dict['file_name_var']+'_init' - +'{init?fmt=%Y%m%d%H}_fhr{lead?fmt=%3H}.nc' - ) - elif job_dict['VERIF_TYPE'] == 'sst': - model_file_format = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.{valid?fmt=%Y%m%d}', - model, job_dict['VERIF_CASE'], 'daily_avg_' - +job_dict['VERIF_TYPE']+'_'+job_dict['job_name'] - +'_init{init?fmt=%Y%m%d%H}_' - +'valid{valid_shift?fmt=%Y%m%d%H?shift=-24}' - +'to{valid?fmt=%Y%m%d%H}.nc' - ) - else: - model_file_format = os.path.join( - verif_case_dir, 'data', model, - model+'.{init?fmt=%Y%m%d%H}.f{lead?fmt=%3H}' - ) - elif job_dict['VERIF_CASE'] == 'grid2obs': - if job_dict['VERIF_TYPE'] == 'ptype' \ - and job_dict['job_name'] == 'Ptype': - model_file_format = os.path.join(verif_case_dir, - 'METplus_output', - job_dict['RUN']+'.' - +'{valid?fmt=%Y%m%d}', - job_dict['MODEL'], - job_dict['VERIF_CASE'], - 'merged_ptype_' - +job_dict['VERIF_TYPE']+'_' - +job_dict['job_name']+'_' - +'init{init?fmt=%Y%m%d%H}_' - +'fhr{lead?fmt=%3H}.nc') - elif job_dict['VERIF_TYPE'] == 'sfc' \ - and job_dict['job_name'] == 'DailyAvg_TempAnom2m': - model_file_format = os.path.join(verif_case_dir, - 'METplus_output', - job_dict['RUN']+'.' - +'{valid?fmt=%Y%m%d}', - job_dict['MODEL'], - job_dict['VERIF_CASE'], - 'anomaly_' - +job_dict['VERIF_TYPE']+'_' - +job_dict['job_name']\ - .replace('DailyAvg_', '') - +'_init' - +'{init?fmt=%Y%m%d%H}_' - +'fhr{lead?fmt=%3H}.stat') - else: - model_file_format = os.path.join( - verif_case_dir, 'data', model, - model+'.{init?fmt=%Y%m%d%H}.f{lead?fmt=%3H}' - ) - fhr_check_dict[str(fhr)]['file1'] = { - 'valid_date': valid_date_dt, - 'init_date': init_date_dt, - 'forecast_hour': str(fhr) - } - fhr+=fhr_inc - for fhr_key in list(fhr_check_dict.keys()): - fhr_key_files_exist_list = [] - for fhr_fileN_key in list(fhr_check_dict[fhr_key].keys()): - fhr_fileN = format_filler( - model_file_format, - fhr_check_dict[fhr_key][fhr_fileN_key]['valid_date'], - fhr_check_dict[fhr_key][fhr_fileN_key]['init_date'], - fhr_check_dict[fhr_key][fhr_fileN_key]['forecast_hour'], - {} - ) - if os.path.exists(fhr_fileN): - fhr_key_files_exist_list.append(True) - if job_dict['JOB_GROUP'] == 'reformat_data' \ - and job_dict['job_name'] in ['GeoHeightAnom', - 'Concentration', - 'SST']: - fhr_list.append( - fhr_check_dict[fhr_key][fhr_fileN_key]\ - ['forecast_hour'] - ) - elif job_dict['JOB_GROUP'] == 'assemble_data' \ - and job_dict['job_name'] in ['TempAnom2m']: - fhr_list.append( - fhr_check_dict[fhr_key][fhr_fileN_key]\ - ['forecast_hour'] - ) - else: - fhr_key_files_exist_list.append(False) - if all(x == True for x in fhr_key_files_exist_list) \ - and len(fhr_key_files_exist_list) > 0: - fhr_list.append(fhr_key) - fhr_list = list( - np.asarray(np.unique(np.asarray(fhr_list, dtype=int)),dtype=str) - ) - # UKMET data doesn't have RH for fhr 132 or 144 - if job_dict['MODEL'] == 'ukmet' \ - and job_dict['VERIF_CASE'] == 'grid2obs' \ - and job_dict['VERIF_TYPE'] == 'pres_levs' \ - and job_dict['job_name'] == 'RelHum': - for fhr_rm in ['132', '144']: - if fhr_rm in fhr_list: - fhr_list.remove(fhr_rm) - if len(fhr_list) != 0: - model_files_exist = True - else: - model_files_exist = False - return model_files_exist, fhr_list - -def check_truth_files(job_dict): - """! - Args: - job_dict - dictionary containing settings - job is running with (strings) - - Returns: - all_truth_file_exist - if all needed truth files - exist or not (boolean) - """ - valid_date_dt = datetime.datetime.strptime( - job_dict['DATE']+job_dict['valid_hr_start'], - '%Y%m%d%H' - ) - verif_case_dir = os.path.join( - job_dict['DATA'], job_dict['VERIF_CASE']+'_'+job_dict['STEP'] - ) - truth_file_list = [] - if job_dict['JOB_GROUP'] == 'reformat_data': - if job_dict['VERIF_CASE'] == 'grid2grid': - if job_dict['VERIF_TYPE'] == 'pres_levs': - model_truth_file = os.path.join( - verif_case_dir, 'data', job_dict['MODEL'], - job_dict['MODEL']+'.'+valid_date_dt.strftime('%Y%m%d%H') - +'.truth' - ) - truth_file_list.append(model_truth_file) - elif job_dict['VERIF_CASE'] == 'grid2obs': - if job_dict['VERIF_TYPE'] in ['pres_levs', 'sfc', 'ptype'] \ - and 'Prepbufr' in job_dict['job_name']: - prepbufr_name = (job_dict['job_name'].replace('Prepbufr', '')\ - .lower()) - prepbufr_file = os.path.join( - verif_case_dir, 'data', 'prepbufr_'+prepbufr_name, - 'prepbufr.'+prepbufr_name+'.' - +valid_date_dt.strftime('%Y%m%d%H') - ) - truth_file_list.append(prepbufr_file) - elif job_dict['JOB_GROUP'] == 'assemble_data': - if job_dict['VERIF_CASE'] == 'grid2grid': - if job_dict['VERIF_TYPE'] == 'precip_accum24hr' \ - and job_dict['job_name'] == '24hrCCPA': - nccpa_files = 4 - n = 1 - while n <= 4: - nccpa_file = os.path.join( - verif_case_dir, 'data', 'ccpa', 'ccpa.6H.' - +(valid_date_dt-datetime.timedelta(hours=(n-1)*6))\ - .strftime('%Y%m%d%H') - ) - truth_file_list.append(nccpa_file) - n+=1 - elif job_dict['VERIF_CASE'] == 'grid2obs': - if job_dict['VERIF_TYPE'] in ['pres_levs', 'sfc', 'ptype']: - pb2nc_file = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.'+valid_date_dt.strftime('%Y%m%d'), - 'prepbufr', job_dict['VERIF_CASE'], 'pb2nc_' - +job_dict['VERIF_TYPE']+'_'+job_dict['prepbufr']+'_valid' - +valid_date_dt.strftime('%Y%m%d%H')+'.nc' - ) - truth_file_list.append(pb2nc_file) - elif job_dict['JOB_GROUP'] == 'generate_stats': - if job_dict['VERIF_CASE'] == 'grid2grid': - if job_dict['VERIF_TYPE'] == 'pres_levs': - model_truth_file = os.path.join( - verif_case_dir, 'data', job_dict['MODEL'], - job_dict['MODEL']+'.'+valid_date_dt.strftime('%Y%m%d%H') - +'.truth' - ) - truth_file_list.append(model_truth_file) - elif job_dict['VERIF_TYPE'] == 'precip_accum24hr': - ccpa_file = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.'+valid_date_dt.strftime('%Y%m%d'), - 'ccpa', job_dict['VERIF_CASE'], 'pcp_combine_' - +job_dict['VERIF_TYPE']+'_24hrCCPA_valid' - +valid_date_dt.strftime('%Y%m%d%H')+'.nc' - ) - truth_file_list.append(ccpa_file) - elif job_dict['VERIF_TYPE'] == 'precip_accum3hr': - ccpa_file = os.path.join( - verif_case_dir, 'data', 'ccpa', 'ccpa.3H.' - +valid_date_dt.strftime('%Y%m%d%H') - ) - truth_file_list.append(ccpa_file) - elif job_dict['VERIF_TYPE'] == 'sea_ice': - osi_saf_file = os.path.join( - verif_case_dir, 'data', 'osi_saf', - 'osi_saf.multi.' - +(valid_date_dt-datetime.timedelta(hours=24))\ - .strftime('%Y%m%d%H') - +'to'+valid_date_dt.strftime('%Y%m%d%H')+'_G004.nc' - ) - truth_file_list.append(osi_saf_file) - elif job_dict['VERIF_TYPE'] == 'snow': - nohrsc_file = os.path.join( - verif_case_dir, 'data', 'nohrsc', - 'nohrsc.24H.'+valid_date_dt.strftime('%Y%m%d%H') - ) - truth_file_list.append(nohrsc_file) - elif job_dict['VERIF_TYPE'] == 'sst': - ghrsst_ospo_file = os.path.join( - verif_case_dir, 'data', 'ghrsst_ospo', - 'ghrsst_ospo.' - +(valid_date_dt-datetime.timedelta(hours=24))\ - .strftime('%Y%m%d%H') - +'to'+valid_date_dt.strftime('%Y%m%d%H')+'.nc' - ) - truth_file_list.append(ghrsst_ospo_file) - elif job_dict['VERIF_CASE'] == 'grid2obs': - if job_dict['VERIF_TYPE'] in ['pres_levs', 'sfc', 'ptype']: - pb2nc_file = os.path.join( - verif_case_dir, 'METplus_output', - job_dict['RUN']+'.'+valid_date_dt.strftime('%Y%m%d'), - 'prepbufr', job_dict['VERIF_CASE'], 'pb2nc_' - +job_dict['VERIF_TYPE']+'_'+job_dict['prepbufr']+'_valid' - +valid_date_dt.strftime('%Y%m%d%H')+'.nc' - ) - truth_file_list.append(pb2nc_file) - truth_files_exist_list = [] - for truth_file in truth_file_list: - if os.path.exists(truth_file): - truth_files_exist_list.append(True) - else: - truth_files_exist_list.append(False) - if all(x == True for x in truth_files_exist_list) \ - and len(truth_files_exist_list) > 0: - all_truth_file_exist = True - else: - all_truth_file_exist = False - return all_truth_file_exist - -def check_stat_files(job_dict): - """! Check for MET .stat files - - Args: - job_dict - dictionary containing settings - job is running with (strings) - - Returns: - stat_files_exist - if .stat files - exist or not (boolean) - """ - model_stat_file_dir = os.path.join( - job_dict['DATA'], job_dict['VERIF_CASE']+'_'+job_dict['STEP'], - 'METplus_output', job_dict['RUN']+'.'+job_dict['DATE'], - job_dict['MODEL'], job_dict['VERIF_CASE'] - ) - stat_file_list = glob.glob(os.path.join(model_stat_file_dir, '*.stat')) - if len(stat_file_list) != 0: - stat_files_exist = True - else: - stat_files_exist = False - return stat_files_exist - -def get_obs_valid_hrs(obs): - """! This returns the valid hour start, end, and increment - information for a given observation - - Args: - obs - observation name (string) - - Returns: - valid_hr_start - starting valid hour (integer) - valid_hr_end - ending valid hour (integer) - valid_hr_inc - valid hour increment (integer) - """ - obs_valid_hr_dict = { - '24hrCCPA': {'valid_hr_start': 12, - 'valid_hr_end': 12, - 'valid_hr_inc': 24}, - '3hrCCPA': {'valid_hr_start': 0, - 'valid_hr_end': 21, - 'valid_hr_inc': 3}, - '24hrNOHRSC': {'valid_hr_start': 12, - 'valid_hr_end': 12, - 'valid_hr_inc': 24}, - 'OSI-SAF': {'valid_hr_start': 00, - 'valid_hr_end': 00, - 'valid_hr_inc': 24}, - 'GHRSST-MEDIAN': {'valid_hr_start': 00, - 'valid_hr_end': 00, - 'valid_hr_inc': 24}, - 'GET_D': {'valid_hr_start': 00, - 'valid_hr_end': 00, - 'valid_hr_inc': 24}, - } - if obs in list(obs_valid_hr_dict.keys()): - valid_hr_start = obs_valid_hr_dict[obs]['valid_hr_start'] - valid_hr_end = obs_valid_hr_dict[obs]['valid_hr_end'] - valid_hr_inc = obs_valid_hr_dict[obs]['valid_hr_inc'] - else: - print(f"ERROR: Cannot get {obs} valid hour information") - sys.exit(1) - return valid_hr_start, valid_hr_end, valid_hr_inc - -def get_off_machine_data(job_file, job_name, job_output, machine, user, queue, - account): - """! This submits a job to the transfer queue - to get data that does not reside on current machine - Args: - job_file - path to job submission file (string) - job_name - job submission name (string) - job_output - path to write job output (string) - machine - machine name (string) - user - user name (string) - queue - submission queue name (string) - account - submission account name (string) - Returns: - """ - # Set up job wall time information - walltime = '60' - walltime_seconds = ( - datetime.timedelta(minutes=int(walltime)).total_seconds() - ) - walltime = (datetime.datetime.min - + datetime.timedelta(minutes=int(walltime))).time() - # Submit job - print("Submitting "+job_file+" to "+queue) - print("Output sent to "+job_output) - os.chmod(job_file, 0o755) - if machine == 'WCOSS2': - os.system('qsub -V -l walltime='+walltime.strftime('%H:%M:%S')+' ' - +'-q '+queue+' -A '+account+' -o '+job_output+' ' - +'-e '+job_output+' -N '+job_name+' ' - +'-l select=1:ncpus=1 '+job_file) - job_check_cmd = ('qselect -s QR -u '+user+' '+'-N ' - +job_name+' | wc -l') - elif machine in ['HERA', 'ORION', 'S4', 'JET']: - os.system('sbatch --ntasks=1 --time=' - +walltime.strftime('%H:%M:%S')+' --partition='+queue+' ' - +'--account='+account+' --output='+job_output+' ' - +'--job-name='+job_name+' '+job_file) - job_check_cmd = ('squeue -u '+user+' -n '+job_name+' ' - +'-t R,PD -h | wc -l') - sleep_counter, sleep_checker = 1, 10 - while (sleep_counter*sleep_checker) <= walltime_seconds: - sleep(sleep_checker) - print("Walltime checker: "+str(sleep_counter*sleep_checker)+" " - +"out of "+str(int(walltime_seconds))+" seconds") - check_job = subprocess.check_output(job_check_cmd, shell=True, - encoding='UTF-8') - if check_job[0] == '0': - break - sleep_counter+=1 - -def initalize_job_env_dict(verif_type, group, - verif_case_step_abbrev_type, job): - """! This initializes a dictionary of environment variables and their - values to be set for the job pulling from environment variables - already set previously - Args: - verif_type - string of the use case name - group - string of the group name - verif_case_step_abbrev_type - string of reference name in config - and environment variables - job - string of job name - Returns: - job_env_dict - dictionary of job settings - """ - job_env_var_list = [ - 'machine', 'evs_ver', 'HOMEevs', 'FIXevs', 'USHevs', 'DATA', 'COMROOT', - 'NET', 'RUN', 'VERIF_CASE', 'STEP', 'COMPONENT', 'COMIN', 'evs_run_mode' - ] - if group in ['reformat_data', 'assemble_data', 'generate_stats', 'gather_stats']: - os.environ['MET_TMP_DIR'] = os.path.join( - os.environ['DATA'], - os.environ['VERIF_CASE']+'_'+os.environ['STEP'], - 'METplus_output', 'tmp' - ) - if not os.path.exists(os.environ['MET_TMP_DIR']): - os.makedirs(os.environ['MET_TMP_DIR']) - job_env_var_list.extend( - ['METPLUS_PATH', 'MET_ROOT', 'MET_TMP_DIR', - 'COMROOT'] - ) - elif group == 'plot': - job_env_var_list.extend(['MET_ROOT', 'met_ver']) - job_env_dict = {} - for env_var in job_env_var_list: - job_env_dict[env_var] = os.environ[env_var] - job_env_dict['JOB_GROUP'] = group - if group in ['reformat_data', 'assemble_data', 'generate_stats', 'plot']: - job_env_dict['VERIF_TYPE'] = verif_type - if group == 'plot': - job_env_dict['job_var'] = job - else: - job_env_dict['job_name'] = job - job_env_dict['fhr_start'] = os.environ[ - verif_case_step_abbrev_type+'_fhr_min' - ] - job_env_dict['fhr_end'] = os.environ[ - verif_case_step_abbrev_type+'_fhr_max' - ] - job_env_dict['fhr_inc'] = os.environ[ - verif_case_step_abbrev_type+'_fhr_inc' - ] - if verif_type in ['pres_levs', 'means', 'sfc', 'ptype']: - verif_type_valid_hr_list = ( - os.environ[verif_case_step_abbrev_type+'_valid_hr_list']\ - .split(' ') - ) - job_env_dict['valid_hr_start'] = ( - verif_type_valid_hr_list[0].zfill(2) - ) - job_env_dict['valid_hr_end'] = ( - verif_type_valid_hr_list[-1].zfill(2) - ) - if len(verif_type_valid_hr_list) > 1: - verif_type_valid_hr_inc = np.min( - np.diff(np.array(verif_type_valid_hr_list, dtype=int)) - ) - else: - verif_type_valid_hr_inc = 24 - job_env_dict['valid_hr_inc'] = str(verif_type_valid_hr_inc) - else: - if verif_type == 'precip_accum24hr': - valid_hr_start, valid_hr_end, valid_hr_inc = ( - get_obs_valid_hrs('24hrCCPA') - ) - elif verif_type == 'precip_accum3hr': - valid_hr_start, valid_hr_end, valid_hr_inc = ( - get_obs_valid_hrs('3hrCCPA') - ) - elif verif_type == 'snow': - valid_hr_start, valid_hr_end, valid_hr_inc = ( - get_obs_valid_hrs('24hrNOHRSC') - ) - elif verif_type == 'sea_ice': - valid_hr_start, valid_hr_end, valid_hr_inc = ( - get_obs_valid_hrs('OSI-SAF') - ) - elif verif_type == 'sst': - valid_hr_start, valid_hr_end, valid_hr_inc = ( - get_obs_valid_hrs('GHRSST-MEDIAN') - ) - else: - valid_hr_start, valid_hr_end, valid_hr_inc = 12, 12, 23 - job_env_dict['valid_hr_start'] = str(valid_hr_start).zfill(2) - job_env_dict['valid_hr_end'] = str(valid_hr_end).zfill(2) - job_env_dict['valid_hr_inc'] = str(valid_hr_inc) - verif_type_init_hr_list = ( - os.environ[verif_case_step_abbrev_type+'_init_hr_list']\ - .split(' ') - ) - job_env_dict['init_hr_start'] = ( - verif_type_init_hr_list[0].zfill(2) - ) - job_env_dict['init_hr_end'] = ( - verif_type_init_hr_list[-1].zfill(2) - ) - if len(verif_type_init_hr_list) > 1: - verif_type_init_hr_inc = np.min( - np.diff(np.array(verif_type_init_hr_list, dtype=int)) - ) - else: - verif_type_init_hr_inc = 24 - job_env_dict['init_hr_inc'] = str(verif_type_init_hr_inc) - return job_env_dict - -def get_plot_dates(logger, date_type, start_date, end_date, - valid_hr_start, valid_hr_end, valid_hr_inc, - init_hr_start, init_hr_end, init_hr_inc, - forecast_hour): - """! This builds the dates to include in plotting based on user - configurations - Args: - logger - logger object - date_type - type of date to plot (string: VALID or INIT) - start_date - plotting start date (string, format: YYYYmmdd) - end_date - plotting end date (string, format: YYYYmmdd) - valid_hr_start - starting valid hour (string) - valid_hr_end - ending valid hour (string) - valid_hr_inc - valid hour increment (string) - init_hr_start - starting initialization hour (string) - init_hr_end - ending initialization hour (string) - init_hr_inc - initialization hour incrrement (string) - forecast_hour - forecast hour (string) - Returns: - valid_dates - array of valid dates (datetime) - init_dates - array of initalization dates (datetime) - """ - # Build date_type date array - if date_type == 'VALID': - start_date_dt = datetime.datetime.strptime(start_date+valid_hr_start, - '%Y%m%d%H') - end_date_dt = datetime.datetime.strptime(end_date+valid_hr_end, - '%Y%m%d%H') - dt_inc = datetime.timedelta(hours=int(valid_hr_inc)) - elif date_type == 'INIT': - start_date_dt = datetime.datetime.strptime(start_date+init_hr_start, - '%Y%m%d%H') - end_date_dt = datetime.datetime.strptime(end_date+init_hr_end, - '%Y%m%d%H') - dt_inc = datetime.timedelta(hours=int(init_hr_inc)) - date_type_dates = (np.arange(start_date_dt, end_date_dt+dt_inc, dt_inc)\ - .astype(datetime.datetime)) - # Build valid and init date arrays - if date_type == 'VALID': - valid_dates = date_type_dates - init_dates = (valid_dates - - datetime.timedelta(hours=(int(forecast_hour)))) - elif date_type == 'INIT': - init_dates = date_type_dates - valid_dates = (init_dates - + datetime.timedelta(hours=(int(forecast_hour)))) - # Check if unrequested hours exist in arrays, and remove - valid_remove_idx_list = [] - valid_hr_list = [ - str(hr).zfill(2) for hr in range(int(valid_hr_start), - int(valid_hr_end)+int(valid_hr_inc), - int(valid_hr_inc)) - ] - for d in range(len(valid_dates)): - if valid_dates[d].strftime('%H') \ - not in valid_hr_list: - valid_remove_idx_list.append(d) - valid_dates = np.delete(valid_dates, valid_remove_idx_list) - init_dates = np.delete(init_dates, valid_remove_idx_list) - init_remove_idx_list = [] - init_hr_list = [ - str(hr).zfill(2) for hr in range(int(init_hr_start), - int(init_hr_end)+int(init_hr_inc), - int(init_hr_inc)) - ] - for d in range(len(init_dates)): - if init_dates[d].strftime('%H') \ - not in init_hr_list: - init_remove_idx_list.append(d) - valid_dates = np.delete(valid_dates, init_remove_idx_list) - init_dates = np.delete(init_dates, init_remove_idx_list) - return valid_dates, init_dates - -def get_met_line_type_cols(logger, met_root, met_version, met_line_type): - """! Get the MET columns for a specific line type and MET - verison - - Args: - logger - logger object - met_root - path to MET (string) - met_version - MET version number (string) - met_line_type - MET line type (string) - Returns: - met_version_line_type_col_list - list of MET versoin - line type colums (strings) - """ - if met_version.count('.') == 2: - met_minor_version = met_version.rpartition('.')[0] - elif met_version.count('.') == 1: - met_minor_version = met_version - met_minor_version_col_file = os.path.join( - met_root, 'share', 'met', 'table_files', - 'met_header_columns_V'+met_minor_version+'.txt' - ) - if os.path.exists(met_minor_version_col_file): - with open(met_minor_version_col_file) as f: - for line in f: - if met_line_type in line: - line_type_cols = line.split(' : ')[-1] - break - else: - logger.error(f"{met_minor_version_col_file} DOES NOT EXISTS, " - +"cannot determine MET data column structure") - sys.exit(1) - met_version_line_type_col_list = ( - line_type_cols.replace('\n', '').split(' ') - ) - return met_version_line_type_col_list - -def format_thresh(thresh): - """! Format threshold with letter and symbol options - - Args: - thresh - the threshold (string) - - Return: - thresh_symbol - threshold with symbols (string) - thresh_letters - treshold with letters (string) - """ - thresh_symbol = ( - thresh.replace('ge', '>=').replace('gt', '>')\ - .replace('eq', '==').replace('ne', '!=')\ - .replace('le', '<=').replace('lt', '<') - ) - thresh_letter = ( - thresh.replace('>=', 'ge').replace('>', 'gt')\ - .replace('==', 'eq').replace('!=', 'ne')\ - .replace('<=', 'le').replace('<', 'lt') - ) - return thresh_symbol, thresh_letter - -def condense_model_stat_files(logger, input_dir, output_file, model, obs, - grid, vx_mask, fcst_var_name, obs_var_name, - line_type): - """! Condense the individual date model stat file and - thin out unneeded data - - Args: - logger - logger object - input_dir - path to input directory (string) - output_file - path to output file (string) - model - model name (string) - obs - observation name (string) - grid - verification grid (string) - vx_mask - verification masking region (string) - fcst_var_name - forecast variable name (string) - obs_var_name - observation variable name (string) - line_type - MET line type (string) - - Returns: - """ - model_stat_files_wildcard = os.path.join(input_dir, model, model+'_*.stat') - model_stat_files = glob.glob(model_stat_files_wildcard, recursive=True) - if len(model_stat_files) == 0: - logger.warning(f"NO STAT FILES IN MATCHING " - +f"{model_stat_files_wildcard}") - else: - if not os.path.exists(output_file): - logger.debug(f"Condensing down stat files matching " - +f"{model_stat_files_wildcard}") - with open(model_stat_files[0]) as msf: - met_header_cols = msf.readline() - all_grep_output = '' - grep_opts = ( - ' | grep "'+obs+' "' - +' | grep "'+grid+' "' - +' | grep "'+vx_mask+' "' - +' | grep "'+fcst_var_name+' "' - +' | grep "'+obs_var_name+' "' - +' | grep "'+line_type+' "' - ) - for model_stat_file in model_stat_files: - logger.debug(f"Getting data from {model_stat_file}") - grep = subprocess.run('grep -R "'+model+'" '+model_stat_file+grep_opts, - shell=True, capture_output=True, encoding="utf8") - logger.debug(f"Ran {ps.args}") - - all_grep_output = all_grep_output+grep.stdout - - logger.debug(f"Condensed {model} .stat file at " - +f"{output_file}") - with open(output_file, 'w') as f: - f.write(met_header_cols+all_grep_output) - -def build_df(logger, input_dir, output_dir, model_info_dict, - met_info_dict, fcst_var_name, fcst_var_level, fcst_var_thresh, - obs_var_name, obs_var_level, obs_var_thresh, line_type, - grid, vx_mask, interp_method, interp_points, date_type, dates, - met_format_valid_dates, fhr): - """! Build the data frame for all model stats, - Read the model parse file, if doesn't exist - parse the model file for need information, and write file - - Args: - logger - logger object - input_dir - path to input directory (string) - output_dir - path to output directory (string) - model_info_dict - model infomation dictionary (strings) - met_info_dict - MET information dictionary (strings) - fcst_var_name - forecast variable name (string) - fcst_var_level - forecast variable level (string) - fcst_var_tresh - forecast variable treshold (string) - obs_var_name - observation variable name (string) - obs_var_level - observation variable level (string) - obs_var_tresh - observation variable treshold (string) - line_type - MET line type (string) - grid - verification grid (string) - vx_mask - verification masking region (string) - interp_method - interpolation method (string) - interp_points - interpolation points (string) - date_type - type of date (string, VALID or INIT) - dates - array of dates (datetime) - met_format_valid_dates - list of valid dates formatted - like they are in MET stat files - fhr - forecast hour (string) - - Returns: - """ - met_version_line_type_col_list = get_met_line_type_cols( - logger, met_info_dict['root'], met_info_dict['version'], line_type - ) - for model_num in list(model_info_dict.keys()): - model_num_name = ( - model_num+'/'+model_info_dict[model_num]['name'] - +'/'+model_info_dict[model_num]['plot_name'] - ) - model_num_df_index = pd.MultiIndex.from_product( - [[model_num_name], met_format_valid_dates], - names=['model', 'valid_dates'] - ) - model_dict = model_info_dict[model_num] - condensed_model_file = os.path.join( - input_dir, model_num+'_'+model_dict['name']+'.stat' - ) - if len(dates) != 0: - if not os.path.exists(condensed_model_file): - write_condensed_stat_file = True - else: - write_condensed_stat_file = False - if write_condensed_stat_file: - condense_model_stat_files( - logger, input_dir, condensed_model_file, model_dict['name'], - model_dict['obs_name'], grid, vx_mask, - fcst_var_name, obs_var_name, line_type - ) - parsed_model_stat_file = os.path.join( - output_dir, - 'fcst'+model_dict['name']+'_' - +fcst_var_name+fcst_var_level+fcst_var_thresh+'_' - +'obs'+model_dict['obs_name']+'_' - +obs_var_name+obs_var_level+obs_var_thresh+'_' - +'linetype'+line_type+'_' - +'grid'+grid+'_'+'vxmask'+vx_mask+'_' - +'interp'+interp_method+interp_points+'_' - +date_type.lower() - +dates[0].strftime('%Y%m%d%H%M%S')+'to' - +dates[-1].strftime('%Y%m%d%H%M%S')+'_' - +'fhr'+fhr.zfill(3) - +'.stat' - ) - if not os.path.exists(parsed_model_stat_file): - write_parse_stat_file = True - read_parse_stat_file = True - else: - write_parse_stat_file = False - read_parse_stat_file = True - else: - write_parse_stat_file = False - read_parse_stat_file = False - if os.path.exists(condensed_model_file) and line_type == 'MCTC': - tmp_df = pd.read_csv( - condensed_model_file, sep=" ", skiprows=1, - skipinitialspace=True, - keep_default_na=False, dtype='str', header=None - ) - if len(tmp_df) > 0: - ncat = int(tmp_df[25][0]) - new_met_version_line_type_col_list = [] - for col in met_version_line_type_col_list: - if col == '(N_CAT)': - new_met_version_line_type_col_list.append('N_CAT') - elif col == 'F[0-9]*_O[0-9]*': - fcount = 1 - ocount = 1 - totcount = 1 - while totcount <= ncat*ncat: - new_met_version_line_type_col_list.append( - 'F'+str(fcount)+'_'+'O'+str(ocount) - ) - if ocount < ncat: - ocount+=1 - elif ocount == ncat: - ocount = 1 - fcount+=1 - totcount+=1 - else: - new_met_version_line_type_col_list.append(col) - met_version_line_type_col_list = ( - new_met_version_line_type_col_list - ) - if write_parse_stat_file: - if fcst_var_thresh != 'NA': - fcst_var_thresh_symbol, fcst_var_thresh_letter = ( - format_thresh(fcst_var_thresh) - ) - else: - fcst_var_thresh_symbol = fcst_var_thresh - fcst_vat_thresh_letter = fcst_var_thresh - if obs_var_thresh != 'NA': - obs_var_thresh_symbol, obs_var_thresh_letter = ( - format_thresh(obs_var_thresh) - ) - else: - obs_var_thresh_symbol = obs_var_thresh - obs_vat_thresh_letter = obs_var_thresh - if os.path.exists(condensed_model_file): - logger.debug(f"Parsing file {condensed_model_file}") - condensed_model_df = pd.read_csv( - condensed_model_file, sep=" ", skiprows=1, - skipinitialspace=True, names=met_version_line_type_col_list, - keep_default_na=False, dtype='str', header=None - ) - parsed_model_df = condensed_model_df[ - (condensed_model_df['MODEL'] == model_dict['name']) - & (condensed_model_df['DESC'] == grid) - & (condensed_model_df['FCST_LEAD'] \ - == fhr.zfill(2)+'0000') - & (condensed_model_df['FCST_VAR'] \ - == fcst_var_name) - & (condensed_model_df['FCST_LEV'] \ - == fcst_var_level) - & (condensed_model_df['OBS_VAR'] \ - == obs_var_name) - & (condensed_model_df['OBS_LEV'] \ - == obs_var_level) - & (condensed_model_df['OBTYPE'] == model_dict['obs_name']) - & (condensed_model_df['VX_MASK'] \ - == vx_mask) - & (condensed_model_df['INTERP_MTHD'] \ - == interp_method) - & (condensed_model_df['INTERP_PNTS'] \ - == interp_points) - & (condensed_model_df['FCST_THRESH'] \ - == fcst_var_thresh_symbol) - & (condensed_model_df['OBS_THRESH'] \ - == obs_var_thresh_symbol) - & (condensed_model_df['LINE_TYPE'] \ - == line_type) - ] - parsed_model_df = parsed_model_df[ - parsed_model_df['FCST_VALID_BEG'].isin(met_format_valid_dates) - ] - parsed_model_df['FCST_VALID_BEG'] = pd.to_datetime( - parsed_model_df['FCST_VALID_BEG'], format='%Y%m%d_%H%M%S' - ) - parsed_model_df = parsed_model_df.sort_values(by='FCST_VALID_BEG') - parsed_model_df['FCST_VALID_BEG'] = ( - parsed_model_df['FCST_VALID_BEG'].dt.strftime('%Y%m%d_%H%M%S') - ) - parsed_model_df.to_csv( - parsed_model_stat_file, header=met_version_line_type_col_list, - index=None, sep=' ', mode='w' - ) - if os.path.exists(parsed_model_stat_file): - logger.debug(f"Parsed {model_dict['name']} file " - +f"at {parsed_model_stat_file}") - else: - logger.debug(f"Could not create {parsed_model_stat_file}") - model_num_df = pd.DataFrame(np.nan, index=model_num_df_index, - columns=met_version_line_type_col_list) - if read_parse_stat_file: - if os.path.exists(parsed_model_stat_file): - logger.debug(f"Reading {parsed_model_stat_file} for " - +f"{model_dict['name']}") - model_stat_file_df = pd.read_csv( - parsed_model_stat_file, sep=" ", skiprows=1, - skipinitialspace=True, names=met_version_line_type_col_list, - na_values=['NA'], header=None - ) - df_dtype_dict = {} - float_idx = met_version_line_type_col_list.index('TOTAL') - for col in met_version_line_type_col_list: - col_idx = met_version_line_type_col_list.index(col) - if col_idx < float_idx: - df_dtype_dict[col] = str - else: - df_dtype_dict[col] = np.float64 - model_stat_file_df = model_stat_file_df.astype(df_dtype_dict) - for valid_date in met_format_valid_dates: - model_stat_file_df_valid_date_idx_list = ( - model_stat_file_df.index[ - model_stat_file_df['FCST_VALID_BEG'] == valid_date - ] - ).tolist() - if len(model_stat_file_df_valid_date_idx_list) == 0: - logger.debug("No data matching valid date " - +f"{valid_date} in" - +f"{parsed_model_stat_file}") - continue - elif len(model_stat_file_df_valid_date_idx_list) > 1: - logger.debug(f"Multiple lines matching valid date " - +f"{valid_date} in " - +f"{parsed_model_stat_file} " - +f"using first one") - else: - logger.debug(f"One line matching valid date " - +f"{valid_date} in " - +f"{parsed_model_stat_file}") - model_num_df.loc[(model_num_name, valid_date)] = ( - model_stat_file_df.loc\ - [model_stat_file_df_valid_date_idx_list[0]]\ - [:] - ) - else: - logger.warning(f"{parsed_model_stat_file} does not exist") - if model_num == 'model1': - all_model_df = model_num_df - else: - all_model_df = pd.concat([all_model_df, model_num_df]) - return all_model_df - -def calculate_stat(logger, data_df, line_type, stat): - """! Calculate the statistic from the data from the - read in MET .stat file(s) - Args: - data_df - dataframe containing the model(s) - information from the MET .stat - files - line_type - MET line type (string) - stat - statistic to calculate (string) - - Returns: - stat_df - dataframe of the statistic - stat_array - array of the statistic - """ - if line_type == 'SL1L2': - FBAR = data_df.loc[:]['FBAR'] - OBAR = data_df.loc[:]['OBAR'] - FOBAR = data_df.loc[:]['FOBAR'] - FFBAR = data_df.loc[:]['FFBAR'] - OOBAR = data_df.loc[:]['OOBAR'] - elif line_type == 'SAL1L2': - FABAR = data_df.loc[:]['FABAR'] - OABAR = data_df.loc[:]['OABAR'] - FOABAR = data_df.loc[:]['FOABAR'] - FFABAR = data_df.loc[:]['FFABAR'] - OOABAR = data_df.loc[:]['OOABAR'] - elif line_type == 'CNT': - FBAR = data_df.loc[:]['FBAR'] - FBAR_NCL = data_df.loc[:]['FBAR_NCL'] - FBAR_NCU = data_df.loc[:]['FBAR_NCU'] - FBAR_BCL = data_df.loc[:]['FBAR_BCL'] - FBAR_BCU = data_df.loc[:]['FBAR_BCU'] - FSTDEV = data_df.loc[:]['FSTDEV'] - FSTDEV_NCL = data_df.loc[:]['FSTDEV_NCL'] - FSTDEV_NCU = data_df.loc[:]['FSTDEV_NCU'] - FSTDEV_BCL = data_df.loc[:]['FSTDEV_BCL'] - FSTDEV_BCU = data_df.loc[:]['FSTDEV_BCU'] - OBAR = data_df.loc[:]['OBAR'] - OBAR_NCL = data_df.loc[:]['OBAR_NCL'] - OBAR_NCU = data_df.loc[:]['OBAR_NCU'] - OBAR_BCL = data_df.loc[:]['OBAR_BCL'] - OBAR_BCU = data_df.loc[:]['OBAR_BCU'] - OSTDEV = data_df.loc[:]['OSTDEV'] - OSTDEV_NCL = data_df.loc[:]['OSTDEV_NCL'] - OSTDEV_NCU = data_df.loc[:]['OSTDEV_NCU'] - OSTDEV_BCL = data_df.loc[:]['OSTDEV_BCL'] - OSTDEV_BCU = data_df.loc[:]['OSTDEV_BCU'] - PR_CORR = data_df.loc[:]['PR_CORR'] - PR_CORR_NCL = data_df.loc[:]['PR_CORR_NCL'] - PR_CORR_NCU = data_df.loc[:]['PR_CORR_NCU'] - PR_CORR_BCL = data_df.loc[:]['PR_CORR_BCL'] - PR_CORR_BCU = data_df.loc[:]['PR_CORR_BCU'] - SP_CORR = data_df.loc[:]['SP_CORR'] - KT_CORR = data_df.loc[:]['KT_CORR'] - RANKS = data_df.loc[:]['RANKS'] - FRANKS_TIES = data_df.loc[:]['FRANKS_TIES'] - ORANKS_TIES = data_df.loc[:]['ORANKS_TIES'] - ME = data_df.loc[:]['ME'] - ME_NCL = data_df.loc[:]['ME_NCL'] - ME_NCU = data_df.loc[:]['ME_NCU'] - ME_BCL = data_df.loc[:]['ME_BCL'] - ME_BCU = data_df.loc[:]['ME_BCU'] - ESTDEV = data_df.loc[:]['ESTDEV'] - ESTDEV_NCL = data_df.loc[:]['ESTDEV_NCL'] - ESTDEV_NCU = data_df.loc[:]['ESTDEV_NCU'] - ESTDEV_BCL = data_df.loc[:]['ESTDEV_BCL'] - ESTDEV_BCU = data_df.loc[:]['ESTDEV_BCU'] - MBIAS = data_df.loc[:]['MBIAS'] - MBIAS_BCL = data_df.loc[:]['MBIAS_BCL'] - MBIAS_BCU = data_df.loc[:]['MBIAS_BCU'] - MAE = data_df.loc[:]['MAE'] - MAE_BCL = data_df.loc[:]['MAE_BCL'] - MAE_BCU = data_df.loc[:]['MAE_BCU'] - MSE = data_df.loc[:]['MSE'] - MSE_BCL = data_df.loc[:]['MSE_BCL'] - MSE_BCU = data_df.loc[:]['MSE_BCU'] - BCRMSE = data_df.loc[:]['BCRMSE'] - BCRMSE_BCL = data_df.loc[:]['BCRMSE_BCL'] - BCRMSE_BCU = data_df.loc[:]['BCRMSE_BCU'] - RMSE = data_df.loc[:]['RMSE'] - RMSE_BCL = data_df.loc[:]['RMSE_BCL'] - RMSE_BCU = data_df.loc[:]['RMSE_BCU'] - E10 = data_df.loc[:]['E10'] - E10_BCL = data_df.loc[:]['E10_BCL'] - E10_BCU = data_df.loc[:]['E10_BCU'] - E25 = data_df.loc[:]['E25'] - E25_BCL = data_df.loc[:]['E25_BCL'] - E25_BCU = data_df.loc[:]['E25_BCU'] - E50 = data_df.loc[:]['E50'] - E50_BCL = data_df.loc[:]['E50_BCL'] - E50_BCU = data_df.loc[:]['E50_BCU'] - E75 = data_df.loc[:]['E75'] - E75_BCL = data_df.loc[:]['E75_BCL'] - E75_BCU = data_df.loc[:]['E75_BCU'] - E90 = data_df.loc[:]['E90'] - E90_BCL = data_df.loc[:]['E90_BCL'] - E90_BCU = data_df.loc[:]['E90_BCU'] - IQR = data_df.loc[:]['IQR'] - IQR_BCL = data_df.loc[:]['IQR_BCL'] - IQR_BCU = data_df.loc[:]['IQR_BCU'] - MAD = data_df.loc[:]['MAD'] - MAD_BCL = data_df.loc[:]['MAD_BCL'] - MAD_BCU = data_df.loc[:]['MAD_BCU'] - ANOM_CORR_NCL = data_df.loc[:]['ANOM_CORR_NCL'] - ANOM_CORR_NCU = data_df.loc[:]['ANOM_CORR_NCU'] - ANOM_CORR_BCL = data_df.loc[:]['ANOM_CORR_BCL'] - ANOM_CORR_BCU = data_df.loc[:]['ANOM_CORR_BCU'] - ME2 = data_df.loc[:]['ME2'] - ME2_BCL = data_df.loc[:]['ME2_BCL'] - ME2_BCU = data_df.loc[:]['ME2_BCU'] - MSESS = data_df.loc[:]['MSESS'] - MSESS_BCL = data_df.loc[:]['MSESS_BCL'] - MSESS_BCU = data_df.loc[:]['MSESS_BCU'] - RMSFA = data_df.loc[:]['RMSFA'] - RMSFA_BCL = data_df.loc[:]['RMSFA_BCL'] - RMSFA_BCU = data_df.loc[:]['RMSFA_BCU'] - RMSOA = data_df.loc[:]['RMSOA'] - RMSOA_BCL = data_df.loc[:]['RMSOA_BCL'] - RMSOA_BCU = data_df.loc[:]['RMSOA_BCU'] - ANOM_CORR_UNCNTR = data_df.loc[:]['ANOM_CORR_UNCNTR'] - ANOM_CORR_UNCNTR_BCL = data_df.loc[:]['ANOM_CORR_UNCNTR_BCL'] - ANOM_CORR_UNCNTR_BCU = data_df.loc[:]['ANOM_CORR_UNCNTR_BCU'] - SI = data_df.loc[:]['SI'] - SI_BCL = data_df.loc[:]['SI_BCL'] - SI_BCU = data_df.loc[:]['SI_BCU'] - elif line_type == 'GRAD': - FGBAR = data_df.loc[:]['FGBAR'] - OGBAR = data_df.loc[:]['OGBAR'] - MGBAR = data_df.loc[:]['MGBAR'] - EGBAR = data_df.loc[:]['EGBAR'] - S1 = data_df.loc[:]['S1'] - S1_OG = data_df.loc[:]['S1_OG'] - FGOG_RATIO = data_df.loc[:]['FGOG_RATIO'] - DX = data_df.loc[:]['DX'] - DY = data_df.loc[:]['DY'] - elif line_type == 'FHO': - F_RATE = data_df.loc[:]['F_RATE'] - H_RATE = data_df.loc[:]['H_RATE'] - O_RATE = data_df.loc[:]['O_RATE'] - elif line_type in ['CTC', 'NBRCTC']: - FY_OY = data_df.loc[:]['FY_OY'] - FY_ON = data_df.loc[:]['FY_ON'] - FN_OY = data_df.loc[:]['FN_OY'] - FN_ON = data_df.loc[:]['FN_ON'] - if line_type == 'CTC': - EC_VALUE = data_df.loc[:]['EC_VALUE'] - elif line_type in ['CTS', 'NBRCTS']: - BASER = data_df.loc[:]['BASER'] - BASER_NCL = data_df.loc[:]['BASER_NCL'] - BASER_NCU = data_df.loc[:]['BASER_NCU'] - BASER_BCL = data_df.loc[:]['BASER_BCL'] - BASER_BCU = data_df.loc[:]['BASER_BCU'] - FMEAN = data_df.loc[:]['FMEAN'] - FMEAN_NCL = data_df.loc[:]['FMEAN_NCL'] - FMEAN_NCU = data_df.loc[:]['FMEAN_NCU'] - FMEAN_BCL = data_df.loc[:]['FMEAN_BCL'] - FMEAN_BCU = data_df.loc[:]['FMEAN_BCU'] - ACC = data_df.loc[:]['ACC'] - ACC_NCL = data_df.loc[:]['ACC_NCL'] - ACC_NCU = data_df.loc[:]['ACC_NCU'] - ACC_BCL = data_df.loc[:]['ACC_BCL'] - ACC_BCU = data_df.loc[:]['ACC_BCU'] - FBIAS = data_df.loc[:]['FBIAS'] - FBIAS_BCL = data_df.loc[:]['FBIAS_BCL'] - FBIAS_BCU = data_df.loc[:]['FBIAS_BCU'] - PODY = data_df.loc[:]['PODY'] - PODY_NCL = data_df.loc[:]['PODY_NCL'] - PODY_NCU = data_df.loc[:]['PODY_NCU'] - PODY_BCL = data_df.loc[:]['PODY_BCL'] - PODY_BCU = data_df.loc[:]['PODY_BCU'] - PODN = data_df.loc[:]['PODN'] - PODN_NCL = data_df.loc[:]['PODN_NCL'] - PODN_NCU = data_df.loc[:]['PODN_NCU'] - PODN_BCL = data_df.loc[:]['PODN_BCL'] - PODN_BCU = data_df.loc[:]['PODN_BCU'] - POFD = data_df.loc[:]['POFD'] - POFD_NCL = data_df.loc[:]['POFD_NCL'] - POFD_NCU = data_df.loc[:]['POFD_NCU'] - POFD_BCL = data_df.loc[:]['POFD_BCL'] - POFD_BCU = data_df.loc[:]['POFD_BCU'] - FAR = data_df.loc[:]['FAR'] - FAR_NCL = data_df.loc[:]['FAR_NCL'] - FAR_NCU = data_df.loc[:]['FAR_NCU'] - FAR_BCL = data_df.loc[:]['FAR_BCL'] - FAR_BCU = data_df.loc[:]['FAR_BCU'] - CSI = data_df.loc[:]['CSI'] - CSI_NCL = data_df.loc[:]['CSI_NCL'] - CSI_NCU = data_df.loc[:]['CSI_NCU'] - CSI_BCL = data_df.loc[:]['CSI_BCL'] - CSI_BCU = data_df.loc[:]['CSI_BCU'] - GSS = data_df.loc[:]['GSS'] - GSS_BCL = data_df.loc[:]['GSS_BCL'] - GSS_BCU = data_df.loc[:]['GSS_BCU'] - HK = data_df.loc[:]['HK'] - HK_NCL = data_df.loc[:]['HK_NCL'] - HK_NCU = data_df.loc[:]['HK_NCU'] - HK_BCL = data_df.loc[:]['HK_BCL'] - HK_BCU = data_df.loc[:]['HK_BCU'] - HSS = data_df.loc[:]['HSS'] - HSS_BCL = data_df.loc[:]['HSS_BCL'] - HSS_BCU = data_df.loc[:]['HSS_BCU'] - ODDS = data_df.loc[:]['ODDS'] - ODDS_NCL = data_df.loc[:]['ODDS_NCL'] - ODDS_NCU = data_df.loc[:]['ODDS_NCU'] - ODDS_BCL = data_df.loc[:]['ODDS_BCL'] - ODDS_BCU = data_df.loc[:]['ODDS_BCU'] - LODDS = data_df.loc[:]['LODDS'] - LODDS_NCL = data_df.loc[:]['LODDS_NCL'] - LODDS_NCU = data_df.loc[:]['LODDS_NCU'] - LODDS_BCL = data_df.loc[:]['LODDS_BCL'] - LODDS_BCU = data_df.loc[:]['LODDS_BCU'] - ORSS = data_df.loc[:]['ORSS'] - ORSS_NCL = data_df.loc[:]['ORSS_NCL'] - ORSS_NCU = data_df.loc[:]['ORSS_NCU'] - ORSS_BCL = data_df.loc[:]['ORSS_BCL'] - ORSS_BCU = data_df.loc[:]['ORSS_BCU'] - EDS = data_df.loc[:]['EDS'] - EDS_NCL = data_df.loc[:]['EDS_NCL'] - EDS_NCU = data_df.loc[:]['EDS_NCU'] - EDS_BCL = data_df.loc[:]['EDS_BCL'] - EDS_BCU = data_df.loc[:]['EDS_BCU'] - SEDS = data_df.loc[:]['SEDS'] - SEDS_NCL = data_df.loc[:]['SEDS_NCL'] - SEDS_NCU = data_df.loc[:]['SEDS_NCU'] - SEDS_BCL = data_df.loc[:]['SEDS_BCL'] - SEDS_BCU = data_df.loc[:]['SEDS_BCU'] - EDI = data_df.loc[:]['EDI'] - EDI_NCL = data_df.loc[:]['EDI_NCL'] - EDI_NCU = data_df.loc[:]['EDI_NCU'] - EDI_BCL = data_df.loc[:]['EDI_BCL'] - EDI_BCU = data_df.loc[:]['EDI_BCU'] - SEDI = data_df.loc[:]['SEDI'] - SEDI_NCL = data_df.loc[:]['SEDI_NCL'] - SEDI_NCU = data_df.loc[:]['SEDI_NCU'] - SEDI_BCL = data_df.loc[:]['SEDI_BCL'] - SEDI_BCU = data_df.loc[:]['SEDI_BCU'] - BAGSS = data_df.loc[:]['BAGSS'] - BAGSS_BCL = data_df.loc[:]['BAGSS_BCL'] - BAGSS_BCU = data_df.loc[:]['BAGSS_BCU'] - if line_type == 'CTS': - EC_VALUE = data_df.loc[:]['EC_VALUE'] - elif line_type == 'MCTC': - F1_O1 = data_df.loc[:]['F1_O1'] - elif line_type == 'NBRCNT': - FBS = data_df.loc[:]['FBS'] - FBS_BCL = data_df.loc[:]['FBS_BCL'] - FBS_BCU = data_df.loc[:]['FBS_BCU'] - FSS = data_df.loc[:]['FSS'] - FSS_BCL = data_df.loc[:]['FSS_BCL'] - FSS_BCU = data_df.loc[:]['FSS_BCU'] - AFSS = data_df.loc[:]['AFSS'] - AFSS_BCL = data_df.loc[:]['AFSS_BCL'] - AFSS_BCU = data_df.loc[:]['AFSS_BCU'] - UFSS = data_df.loc[:]['UFSS'] - UFSS_BCL = data_df.loc[:]['UFSS_BCL'] - UFSS_BCU = data_df.loc[:]['UFSS_BCU'] - F_RATE = data_df.loc[:]['F_RATE'] - F_RATE_BCL = data_df.loc[:]['F_RATE_BCL'] - F_RATE_BCU = data_df.loc[:]['F_RATE_BCU'] - O_RATE = data_df.loc[:]['O_RATE'] - O_RATE_BCL = data_df.loc[:]['O_RATE_BCL'] - O_RATE_BCU = data_df.loc[:]['O_RATE_BCU'] - elif line_type == 'VL1L2': - UFBAR = data_df.loc[:]['UFBAR'] - VFBAR = data_df.loc[:]['VFBAR'] - UOBAR = data_df.loc[:]['UOBAR'] - VOBAR = data_df.loc[:]['VOBAR'] - UVFOBAR = data_df.loc[:]['UVFOBAR'] - UVFFBAR = data_df.loc[:]['UVFFBAR'] - UVOOBAR = data_df.loc[:]['UVOOBAR'] - elif line_type == 'VAL1L2': - UFABAR = data_df.loc[:]['UFABAR'] - VFABAR = data_df.loc[:]['VFABAR'] - UOABAR = data_df.loc[:]['UOABAR'] - VOABAR = data_df.loc[:]['VOABAR'] - UVFOABAR = data_df.loc[:]['UVFOABAR'] - UVFFABAR = data_df.loc[:]['UVFFABAR'] - UVOOABAR = data_df.loc[:]['UVOOABAR'] - FA_SPEED_BAR = data_df.loc[:]['FA_SPEED_BAR'] - OA_SPEED_BAR = data_df.loc[:]['OA_SPEED_BAR'] - elif line_type == 'VCNT': - FBAR = data_df.loc[:]['FBAR'] - OBAR = data_df.loc[:]['OBAR'] - FS_RMS = data_df.loc[:]['FS_RMS'] - OS_RMS = data_df.loc[:]['OS_RMS'] - MSVE = data_df.loc[:]['MSVE'] - RMSVE = data_df.loc[:]['RMSVE'] - FSTDEV = data_df.loc[:]['FSTDEV'] - OSTDEV = data_df.loc[:]['OSTDEV'] - FDIR = data_df.loc[:]['FDIR'] - ORDIR = data_df.loc[:]['ODIR'] - FBAR_SPEED = data_df.loc[:]['FBAR_SPEED'] - OBAR_SPEED = data_df.loc[:]['OBAR_SPEED'] - VDIFF_SPEED = data_df.loc[:]['VDIFF_SPEED'] - VDIFF_DIR = data_df.loc[:]['VDIFF_DIR'] - SPEED_ERR = data_df.loc[:]['SPEED_ERR'] - SPEED_ABSERR = data_df.loc[:]['SPEED_ABSERR'] - DIR_ERR = data_df.loc[:]['DIR_ERR'] - DIR_ABSERR = data_df.loc[:]['DIR_ABSERR'] - ANOM_CORR = data_df.loc[:]['ANOM_CORR'] - ANOM_CORR_NCL = data_df.loc[:]['ANOM_CORR_NCL'] - ANOM_CORR_NCU = data_df.loc[:]['ANOM_CORR_NCU'] - ANOM_CORR_BCL = data_df.loc[:]['ANOM_CORR_BCL'] - ANOM_CORR_BCU = data_df.loc[:]['ANOM_CORR_BCU'] - ANOM_CORR_UNCNTR = data_df.loc[:]['ANOM_CORR_UNCNTR'] - ANOM_CORR_UNCNTR_BCL = data_df.loc[:]['ANOM_CORR_UNCNTR_BCL'] - ANOM_CORR_UNCNTR_BCU = data_df.loc[:]['ANOM_CORR_UNCNTR_BCU'] - if stat == 'ACC': # Anomaly Correlation Coefficient - if line_type == 'SAL1L2': - stat_df = (FOABAR - FABAR*OABAR) \ - /np.sqrt((FFABAR - FABAR*FABAR)* - (OOABAR - OABAR*OABAR)) - elif line_type in ['CNT', 'VCNT']: - stat_df = ANOM_CORR - elif line_type == 'VAL1L2': - stat_df = UVFOABAR/np.sqrt(UVFFABAR*UVOOABAR) - elif stat in ['BIAS', 'ME']: # Bias/Mean Error - if line_type == 'SL1L2': - stat_df = FBAR - OBAR - elif line_type == 'CNT': - stat_df = ME - elif line_type == 'VL1L2': - stat_df = np.sqrt(UVFFBAR) - np.sqrt(UVOOBAR) - elif stat == 'CSI': # Critical Success Index' - if line_type == 'CTC': - stat_df = FY_OY/(FY_OY + FY_ON + FN_OY) - elif stat == 'F1_O1': # Count of forecast category 1 and observation category 1 - if line_type == 'MCTC': - stat_df = F1_O1 - elif stat in ['ETS', 'GSS']: # Equitable Threat Score/Gilbert Skill Score - if line_type == 'CTC': - TOTAL = FY_OY + FY_ON + FN_OY + FN_ON - C = ((FY_OY + FY_ON)*(FY_OY + FN_OY))/TOTAL - stat_df = (FY_OY - C)/(FY_OY + FY_ON + FN_OY - C) - elif line_type == 'CTS': - stat_df = GSS - elif stat == 'FBAR': # Forecast Mean - if line_type == 'SL1L2': - stat_df = FBAR - elif stat == 'FBIAS': # Frequency Bias - if line_type == 'CTC': - stat_df = (FY_OY + FY_ON)/(FY_OY + FN_OY) - elif line_type == 'CTS': - stat_df = FBIAS - elif stat == 'FSS': # Fraction Skill Score - if line_type == 'NBRCNT': - stat_df = FSS - elif stat == 'FY_OY': # Forecast Yes/Obs Yes - if line_type == 'CTC': - stat_df = FY_OY - elif stat == 'HSS': # Heidke Skill Score - if line_type == 'CTC': - TOTAL = FY_OY + FY_ON + FN_OY + FN_ON - CA = (FY_OY+FY_ON)*(FY_OY+FN_OY) - CB = (FN_OY+FN_ON)*(FY_ON+FN_ON) - C = (CA + CB)/TOTAL - stat_df = (FY_OY + FN_ON - C)/(TOTAL - C) - elif stat == 'OBAR': # Observation Mean - if line_type == 'SL1L2': - stat_df = OBAR - elif stat == 'POD': # Probability of Detection - if line_type == 'CTC': - stat_df = FY_OY/(FY_OY + FN_OY) - elif stat == 'RMSE': # Root Mean Square Error - if line_type == 'SL1L2': - stat_df = np.sqrt(FFBAR + OOBAR - 2*FOBAR) - elif line_type == 'CNT': - stat_df = RMSE - elif line_type == 'VL1L2': - stat_df = np.sqrt(UVFFBAR + UVOOBAR - 2*UVFOBAR) - elif stat == 'S1': # S1 - if line_type == 'GRAD': - stat_df = S1 - elif stat == 'SRATIO': # Success Ratio - if line_type == 'CTC': - stat_df = 1 - (FY_ON/(FY_ON + FY_OY)) - else: - logger.error(stat+" IS NOT AN OPTION") - sys.exit(1) - idx = 0 - idx_dict = {} - while idx < stat_df.index.nlevels: - idx_dict['index'+str(idx)] = len( - stat_df.index.get_level_values(idx).unique() - ) - idx+=1 - if stat_df.index.nlevels == 1: - stat_array = stat_df.values.reshape( - idx_dict['index0'] - ) - elif stat_df.index.nlevels == 2: - stat_array = stat_df.values.reshape( - idx_dict['index0'], idx_dict['index1'] - ) - return stat_df, stat_array - -def calculate_average(logger, average_method, line_type, stat, df): - """! Calculate average of dataset - - Args: - logger - logger object - average_method - method to use to - calculate the - average (string: - mean, aggregation) - line_type - line type to calculate - stat from - stat - statistic to calculate - (string) - df - dataframe of values - Returns: - """ - average_value = np.nan - if average_method == 'mean': - average_value = np.ma.masked_invalid(df).mean() - elif average_method == 'aggregation': - if not df.isnull().values.all(): - ndays = ( - len(df.loc[:,'TOTAL']) - -np.ma.count_masked(np.ma.masked_invalid(df.loc[:,'TOTAL'])) - ) - avg_df, avg_array = calculate_stat( - logger, df.loc[:,'TOTAL':].agg(['sum'])/ndays, - line_type, stat - ) - average_value = avg_array[0] - else: - logger.warning(f"{average_method} not recongnized..." - +"use mean, or aggregation...returning NaN") - return average_value From 164ef55b6cada0d49b674a361e1c6507e4bcc24e Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Wed, 4 Dec 2024 19:27:30 +0000 Subject: [PATCH 12/19] Seperate valid 00z and 12z in snowfall plot --- .../plots/cam/exevs_refs_snowfall_plots.sh | 104 +++++++++--------- 1 file changed, 54 insertions(+), 50 deletions(-) diff --git a/scripts/plots/cam/exevs_refs_snowfall_plots.sh b/scripts/plots/cam/exevs_refs_snowfall_plots.sh index e5d99905f5..cadabe9760 100755 --- a/scripts/plots/cam/exevs_refs_snowfall_plots.sh +++ b/scripts/plots/cam/exevs_refs_snowfall_plots.sh @@ -111,89 +111,90 @@ for stats in ets_fbias ratio_pod_csi fss ; do if [ $FCST_LEVEL_value = A06 ] ; then export fcst_leads='6,12,18,24,30,36,42,48' - export fcst_valid_hour='0,6,12,18' - valid_rst=00z_06z_12z_18z + export fcst_valid_hours='00 06 12 18' elif [ $FCST_LEVEL_value = A24 ] ; then export fcst_leads='24,30,36,42,48' - export fcst_valid_hour='0,12' - valid_rst=00z_12z + export fcst_valid_hours='00 12' fi - for lead in $fcst_leads ; do level=`echo $FCST_LEVEL_value | tr '[A-Z]' '[a-z]'` for line_type in $line_tp ; do + for fcst_valid_hour in $fcst_valid_hours ; do + #***************************** # Build sub-jobs # **************************** - > run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + > run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh #*********************************************************************************************************************************** # Check if this sub-job has been completed in the previous run for restart - if [ ! -e $restart/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.completed ] ; then + if [ ! -e $restart/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.completed ] ; then #*********************************************************************************************************************************** - echo "#!/bin/ksh" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "export PLOT_TYPE=$score_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "export vx_mask_list='$VX_MASK_LIST'" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "export verif_case=$verif_case" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "export verif_type=$verif_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "#!/bin/ksh" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "export PLOT_TYPE=$score_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "export vx_mask_list='$VX_MASK_LIST'" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "export verif_case=$verif_case" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "export verif_type=$verif_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh - echo "export log_level=DEBUG" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export log_level=DEBUG" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh - echo "export eval_period=TEST" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export eval_period=TEST" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh if [ $score_type = valid_hour_average ] ; then - echo "export date_type=INIT" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export date_type=INIT" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh else - echo "export date_type=VALID" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export date_type=VALID" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh fi - echo "export var_name=$VAR" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "export fcts_level=$FCST_LEVEL_value" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "export obs_level=$OBS_LEVEL_value" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export var_name=$VAR" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "export fcts_level=$FCST_LEVEL_value" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "export obs_level=$OBS_LEVEL_value" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh - echo "export line_type=$line_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export line_type=$line_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh if [ $stats = fss ] ; then - echo "export interp=NBRHD_SQUARE" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export interp=NBRHD_SQUARE" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh else - echo "export interp=NEAREST" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export interp=NEAREST" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh fi - echo "export score_py=$score_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "export score_py=$score_type" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh thresh_fcst='>=0.0254, >=0.1016, >=0.2032, >=0.3048' thresh_obs=$thresh_fcst - sed -e "s!model_list!$models!g" -e "s!stat_list!$stat_list!g" -e "s!thresh_fcst!$thresh_fcst!g" -e "s!thresh_obs!$thresh_obs!g" -e "s!fcst_init_hour!$fcst_init_hour!g" -e "s!fcst_valid_hour!$fcst_valid_hour!g" -e "s!fcst_lead!$lead!g" -e "s!interp_pnts!$interp_pnts!g" $USHevs/cam/evs_refs_plots_config.sh > run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + sed -e "s!model_list!$models!g" -e "s!stat_list!$stat_list!g" -e "s!thresh_fcst!$thresh_fcst!g" -e "s!thresh_obs!$thresh_obs!g" -e "s!fcst_init_hour!$fcst_init_hour!g" -e "s!fcst_valid_hour!$fcst_valid_hour!g" -e "s!fcst_lead!$lead!g" -e "s!interp_pnts!$interp_pnts!g" $USHevs/cam/evs_refs_plots_config.sh > run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh - chmod +x run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + chmod +x run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh - echo "${DATA}/run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "${DATA}/run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh #Save for restart - echo "if [ -s ${plot_dir}/${score_type}_regional_${domain}_valid_${valid_rst}_*${var}*.png ] ; then " >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo " cp -v ${plot_dir}/${score_type}_regional_${domain}_valid_${valid_rst}_*${var}*.png $restart" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo " >$restart/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.completed" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "fi" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh + echo "if [ -s ${plot_dir}/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png ] ; then " >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo " cp -v ${plot_dir}/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png $restart" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo " >$restart/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.completed" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "fi" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh - chmod +x run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh - echo "${DATA}/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.sh" >> run_all_poe.sh + chmod +x run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "${DATA}/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh" >> run_all_poe.sh else #Restart from existing png files of previous run - if [ -s $restart/${score_type}_regional_${domain}_valid_${valid_rst}_*${var}*.png ] ; then - cp $restart/${score_type}_regional_${domain}_valid_${valid_rst}_*${var}*.png ${plot_dir}/. + if [ -s $restart/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png ] ; then + cp $restart/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png ${plot_dir}/. fi fi + done #end of fcst_valid_hour + done #end of line_type done #end of FCST_LEVEL_value @@ -233,28 +234,29 @@ for stats in ets fbias fss ; do for level in 06h 24h ; do if [ $stats = fss ] ; then if [ $level = 06h ] ; then - valid=valid_00z_06z_12z_18z + valids="00z 06z 12z 18z" lead=width1-3-5-7-9-11_f6-12-18-24-30-36-42-48 elif [ $level = 24h ] ; then - valid=valid_00z_12z + valids="00z 12z" lead=width1-3-5-7-9-11_f24-30-36-42-48 fi else if [ $level = 06h ] ; then - valid=valid_00z_06z_12z_18z + valids="00z 06z 12z 18z" lead=f6-12-18-24-30-36-42-48 elif [ $level = 24h ] ; then - valid=valid_00z_12z + valids="00z 12z" lead=f24-30-36-42-48 fi fi - - for domain in conus conus_east conus_west conus_south conus_central ; do - if [ -s ${score_type}_regional_${domain}_${valid}_${level}_${var}_${stats}_${lead}.png ] ; then - mv ${score_type}_regional_${domain}_${valid}_${level}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid00z12z.buk_${domain}.png - fi + + for valid in $valids ; do + for domain in conus conus_east conus_west conus_south conus_central ; do + if [ -s ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}_${lead}.png ] ; then + mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${stats}_${lead}.png evs.refs.${stats}.${var}_${level}.last${last_days}days.${scoretype}_valid${valid}.buk_${domain}.png + fi + done done - done done done @@ -266,18 +268,20 @@ scoretype='perfdiag' for var in weasd ; do for level in 06h 24h ; do if [ $level = 06h ] ; then - valid=valid_00z_06z_12z_18z + valids="00z 06z 12z 18z" lead=f6-12-18-24-30-36-42-48__ge0.0254ge0.1016ge0.2032ge0.3048 elif [ $level = 24h ] ; then - valid=valid_00z_12z + valids="00z 12z" lead=f24-30-36-42-48__ge0.0254ge0.1016ge0.2032ge0.3048 fi - for domain in conus conus_east conus_west conus_south conus_central ; do - if [ -s ${score_type}_regional_${domain}_${valid}_${level}_${var}_${lead}.png ] ; then - mv ${score_type}_regional_${domain}_${valid}_${level}_${var}_${lead}.png evs.refs.ctc.${var}_${level}.last${last_days}days.${scoretype}_valid00z12z.buk_${domain}.png + for valid in $valids ; do + for domain in conus conus_east conus_west conus_south conus_central ; do + if [ -s ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${lead}.png ] ; then + mv ${score_type}_regional_${domain}_valid_${valid}_${level}_${var}_${lead}.png evs.refs.ctc.${var}_${level}.last${last_days}days.${scoretype}_valid${valid}.buk_${domain}.png fi - done + done + done done done From 720efdf06b378783e5131c8b82becf96f2448eba Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Fri, 6 Dec 2024 00:16:29 +0000 Subject: [PATCH 13/19] Adjust the required cpu resources --- .../jevs_cam_refs_grid2obs_cape_last31days_plots.sh | 2 +- .../jevs_cam_refs_grid2obs_cape_last90days_plots.sh | 2 +- .../jevs_cam_refs_grid2obs_ctc_last31days_plots.sh | 2 +- .../jevs_cam_refs_grid2obs_ctc_last90days_plots.sh | 2 +- .../jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh | 2 +- .../jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh | 2 +- .../jevs_cam_refs_grid2obs_cape_last31days_plots.ecf | 2 +- .../jevs_cam_refs_grid2obs_cape_last90days_plots.ecf | 2 +- .../jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf | 2 +- .../jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf | 2 +- .../jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf | 2 +- .../jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf | 2 +- scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh | 2 +- scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh | 2 +- scripts/plots/cam/exevs_refs_snowfall_plots.sh | 12 ++++++++---- 15 files changed, 22 insertions(+), 18 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh index d9afe80893..f5310aa595 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh index 5819de9014..31a2917d58 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh index 0c866b1843..3c483d91fc 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=82:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh index 4d88804c65..6f257536a9 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=82:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh index 92f9af1d03..149a6e5004 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=72:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=66:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh index 185a6633e5..3234c4da1d 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=72:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=66:mem=100GB #PBS -l debug=true set -x diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf index cedd850a96..5267f5d423 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf index dacbf8596e..6b3641bcaf 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf index 82c15d514b..3eb43ca151 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf @@ -4,7 +4,7 @@ nPBS -N evs_cam_refs_grid2obs_ctc_last31days_plots #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=82:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf index ca5eecb860..d08932ba25 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=10:ncpus=82:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf index ffff4e3787..7d91335764 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=1:ncpus=72:mem=100GB +#PBS -l place=vscatter:shared,select=1:ncpus=66:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf index d075e1f860..3c5aa925ad 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=1:ncpus=72:mem=100GB +#PBS -l place=vscatter:shared,select=1:ncpus=66:mem=100GB #PBS -l debug=true export model=evs diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh index 1991297050..92a4f30cd4 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh @@ -302,7 +302,7 @@ chmod +x run_all_poe.sh # Run the POE script in parallel or in sequence order to generate png files #************************************************************************** if [ $run_mpi = yes ] ; then - mpiexec -np 820 -ppn 82 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh + mpiexec -np 512 -ppn 85 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh else ${DATA}/run_all_poe.sh fi diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh index 2b688c5f0a..d0d406e067 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ecnt_plots.sh @@ -222,7 +222,7 @@ chmod +x run_all_poe.sh # Run the POE script in parallel or in sequence order to generate png files #************************************************************************** if [ $run_mpi = yes ] ; then - mpiexec -np 72 -ppn 72 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh + mpiexec -np 66 -ppn 66 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh else ${DATA}/run_all_poe.sh fi diff --git a/scripts/plots/cam/exevs_refs_snowfall_plots.sh b/scripts/plots/cam/exevs_refs_snowfall_plots.sh index cadabe9760..5cf7a52280 100755 --- a/scripts/plots/cam/exevs_refs_snowfall_plots.sh +++ b/scripts/plots/cam/exevs_refs_snowfall_plots.sh @@ -112,11 +112,14 @@ for stats in ets_fbias ratio_pod_csi fss ; do if [ $FCST_LEVEL_value = A06 ] ; then export fcst_leads='6,12,18,24,30,36,42,48' export fcst_valid_hours='00 06 12 18' + accum=06h elif [ $FCST_LEVEL_value = A24 ] ; then export fcst_leads='24,30,36,42,48' export fcst_valid_hours='00 12' + accum=24h fi + for lead in $fcst_leads ; do level=`echo $FCST_LEVEL_value | tr '[A-Z]' '[a-z]'` @@ -178,8 +181,9 @@ for stats in ets_fbias ratio_pod_csi fss ; do echo "${DATA}/run_py.${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh #Save for restart - echo "if [ -s ${plot_dir}/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png ] ; then " >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh - echo " cp -v ${plot_dir}/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png $restart" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + echo "if [ -s ${plot_dir}/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_${accum}_${var}*.png ] ; then " >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh + #threshold_average_regional_conus_east_valid_12z_24h_weasd_ets_f24-30-36-42-48.png + echo " cp -v ${plot_dir}/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_${accum}_${var}*.png $restart" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh echo " >$restart/run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.completed" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh echo "fi" >> run_${stats}.${score_type}.${lead}.${VAR}.${FCST_LEVEL_value}.${line_type}.${VX_MASK_LIST}.${fcst_valid_hour}.sh @@ -188,8 +192,8 @@ for stats in ets_fbias ratio_pod_csi fss ; do else #Restart from existing png files of previous run - if [ -s $restart/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png ] ; then - cp $restart/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_*${var}*.png ${plot_dir}/. + if [ -s $restart/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_${accum}_${var}*.png ] ; then + cp $restart/${score_type}_regional_${domain}_valid_${fcst_valid_hour}z_${accum}_${var}*.png ${plot_dir}/. fi fi From acb83f9ee8294d0037c7179c5ea83b26b1305ccc Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Fri, 6 Dec 2024 15:37:13 +0000 Subject: [PATCH 14/19] Adjust ncpus again --- scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh | 2 +- scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh index 0250339f2b..39c1c64907 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh @@ -269,7 +269,7 @@ chmod +x run_all_poe.sh # Run the POE script in parallel or in sequence order to generate png files #************************************************************************** if [ $run_mpi = yes ] ; then - mpiexec -np 840 -ppn 84 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh + mpiexec -np 768 -ppn 86 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh else ${DATA}/run_all_poe.sh fi diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh index 92a4f30cd4..ca139cec34 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh @@ -302,7 +302,7 @@ chmod +x run_all_poe.sh # Run the POE script in parallel or in sequence order to generate png files #************************************************************************** if [ $run_mpi = yes ] ; then - mpiexec -np 512 -ppn 85 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh + mpiexec -np 512 -ppn 86 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh else ${DATA}/run_all_poe.sh fi From 75900390c7820c26c486fe817ebfef63094080b6 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Fri, 6 Dec 2024 23:49:48 +0000 Subject: [PATCH 15/19] Match ncpus for dev and ecf driver scripts --- .../plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh index f5310aa595..caace6543e 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh index 31a2917d58..149e3e1ad3 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh index 3c483d91fc..d0d005116d 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh index 6f257536a9..0e92580323 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB #PBS -l debug=true set -x diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf index 5267f5d423..afa7f93704 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf index 6b3641bcaf..5028e620db 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=84:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf index 3eb43ca151..e5be8857f3 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf @@ -4,7 +4,7 @@ nPBS -N evs_cam_refs_grid2obs_ctc_last31days_plots #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf index d08932ba25..efbd3fb769 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB #PBS -l debug=true export model=evs From db1301197ab981f1e77ef527d79d2f3b3c3c3e98 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Tue, 10 Dec 2024 18:30:41 +0000 Subject: [PATCH 16/19] Adjust cpu request again --- .../plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf | 2 +- scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh | 2 +- scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh index caace6543e..95e6f2edda 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh index 149e3e1ad3..d169d82759 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh index d0d005116d..3c483d91fc 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh index 0e92580323..6f257536a9 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true set -x diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf index afa7f93704..b236aed568 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf index 5028e620db..10d6ecc19e 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf index e5be8857f3..3eb43ca151 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf @@ -4,7 +4,7 @@ nPBS -N evs_cam_refs_grid2obs_ctc_last31days_plots #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf index efbd3fb769..d08932ba25 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=86:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB #PBS -l debug=true export model=evs diff --git a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh index 39c1c64907..400fbd5d2d 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_cape_plots.sh @@ -269,7 +269,7 @@ chmod +x run_all_poe.sh # Run the POE script in parallel or in sequence order to generate png files #************************************************************************** if [ $run_mpi = yes ] ; then - mpiexec -np 768 -ppn 86 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh + mpiexec -np 765 -ppn 85 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh else ${DATA}/run_all_poe.sh fi diff --git a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh index ca139cec34..1de0cacc34 100755 --- a/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh +++ b/scripts/plots/cam/exevs_refs_grid2obs_ctc_plots.sh @@ -302,7 +302,7 @@ chmod +x run_all_poe.sh # Run the POE script in parallel or in sequence order to generate png files #************************************************************************** if [ $run_mpi = yes ] ; then - mpiexec -np 512 -ppn 86 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh + mpiexec -np 510 -ppn 85 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh else ${DATA}/run_all_poe.sh fi From e0c731ccc842d911b89055efa5e17c39c27fe1a9 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Tue, 10 Dec 2024 19:49:13 +0000 Subject: [PATCH 17/19] Adjust precip plot cpu request --- .../scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh | 2 +- .../scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh | 2 +- ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf | 2 +- ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf | 2 +- scripts/plots/cam/exevs_refs_precip_plots.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh index 21b10aef9b..357ddc7fff 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=4:ncpus=78:mem=100GB +#PBS -l place=vscatter,select=4:ncpus=76:mem=100GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh index 190c002d77..f8d84ab3f1 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=4:ncpus=78:mem=200GB +#PBS -l place=vscatter,select=4:ncpus=76:mem=200GB #PBS -l debug=true set -x diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf index 4fe2238e76..fd29c90b0f 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=4:ncpus=78:mem=100GB +#PBS -l place=vscatter,select=4:ncpus=76:mem=100GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf index a310d2d21d..581f080274 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_precip_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=4:ncpus=78:mem=200GB +#PBS -l place=vscatter,select=4:ncpus=76:mem=200GB #PBS -l debug=true export model=evs diff --git a/scripts/plots/cam/exevs_refs_precip_plots.sh b/scripts/plots/cam/exevs_refs_precip_plots.sh index bc7e8b798e..e56b66e025 100755 --- a/scripts/plots/cam/exevs_refs_precip_plots.sh +++ b/scripts/plots/cam/exevs_refs_precip_plots.sh @@ -233,7 +233,7 @@ chmod +x run_all_poe.sh # Run the POE script in parallel or in sequence order to generate png files #************************************************************************** if [ $run_mpi = yes ] ; then - mpiexec -np 312 -ppn 78 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh + mpiexec -np 304 -ppn 76 --cpu-bind verbose,depth cfp ${DATA}/run_all_poe.sh else ${DATA}/run_all_poe.sh fi From d95827fd3536791e6321a700b5d8efcd33cb86e3 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Wed, 11 Dec 2024 00:37:03 +0000 Subject: [PATCH 18/19] Reduce the requested memory for plotting jobs --- .../plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh | 2 +- .../scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh | 2 +- .../scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh index 95e6f2edda..f1f677b8b3 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=40GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh index d169d82759..9e21b5da11 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=40GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh index 3c483d91fc..4552b02a84 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=40GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh index 6f257536a9..851c0e4a95 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=40GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh index 149a6e5004..38cc435b6c 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=66:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=66:mem=50GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh index 3234c4da1d..7d5542a96f 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=66:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=66:mem=50GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh index c96733c810..a40aa08685 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=60:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=60:mem=60GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh index c7751f7547..a7a57bf25d 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=60:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=60:mem=60GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh index b98ebc70ac..4a5598bc10 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=30:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=30:mem=20GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh index b82f987ba8..4a2da6bc2e 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=30:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=30:mem=20GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh index 06a0f687fd..eaf042a9aa 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=6:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=6:mem=5GB #PBS -l debug=true set -x diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh index 034e1ab643..1884fd1936 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:20:00 -#PBS -l place=vscatter,select=1:ncpus=6:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=6:mem=5GB #PBS -l debug=true set -x From da1005893474560a634c5770c208e5efb75687e6 Mon Sep 17 00:00:00 2001 From: "Binbin.Zhou" Date: Wed, 11 Dec 2024 14:14:14 +0000 Subject: [PATCH 19/19] Adjust subscribed memory for potting jobs in ecf --- .../scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf | 2 +- ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_profile_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_profile_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf | 2 +- .../plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh index a462c07a7b..bee662ebd5 100755 --- a/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh +++ b/dev/drivers/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.sh @@ -4,7 +4,7 @@ #PBS -S /bin/bash #PBS -A VERF-DEV #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=1:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=1:mem=5GB #PBS -l debug=true set -x diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf index b236aed568..b9fab48727 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=40GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf index 10d6ecc19e..8a76ed29e7 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_cape_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=9:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=9:ncpus=85:mem=40GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf index 3eb43ca151..29b0ba99c0 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last31days_plots.ecf @@ -4,7 +4,7 @@ nPBS -N evs_cam_refs_grid2obs_ctc_last31days_plots #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=40GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf index d08932ba25..f09b4014ea 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ctc_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=6:ncpus=85:mem=100GB +#PBS -l place=vscatter,select=6:ncpus=85:mem=40GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf index 7d91335764..131d5e1706 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=1:ncpus=66:mem=100GB +#PBS -l place=vscatter:shared,select=1:ncpus=66:mem=50GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf index 3c5aa925ad..391a20a780 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_grid2obs_ecnt_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter:shared,select=1:ncpus=66:mem=100GB +#PBS -l place=vscatter:shared,select=1:ncpus=66:mem=50GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf index 6ca3ce39b5..b2b79d53ea 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_precip_spatial_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=1:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=1:mem=5GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf index 5718a0c543..c091c0d388 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=60:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=60:mem=60GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf index d84c9a2afc..a49c071acb 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_profile_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=60:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=60:mem=60GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf index 813bf92893..e7a5f0fa79 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=30:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=30:mem=20GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf index 367687a7c7..f986d25671 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_snowfall_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:15:00 -#PBS -l place=vscatter,select=1:ncpus=30:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=30:mem=20GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf index 484fd4024f..b615cb4b15 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last31days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:10:00 -#PBS -l place=vscatter,select=1:ncpus=6:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=6:mem=5GB #PBS -l debug=true export model=evs diff --git a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf index 938d17a105..28c7b8c94c 100755 --- a/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf +++ b/ecf/scripts/plots/cam/jevs_cam_refs_spcoutlook_last90days_plots.ecf @@ -4,7 +4,7 @@ #PBS -q %QUEUE% #PBS -A %PROJ%-%PROJENVIR% #PBS -l walltime=00:20:00 -#PBS -l place=vscatter,select=1:ncpus=6:mem=100GB +#PBS -l place=vscatter,select=1:ncpus=6:mem=5GB #PBS -l debug=true export model=evs