diff --git a/antarctica_today/__init__.py b/antarctica_today/__init__.py index e869860..624a501 100644 --- a/antarctica_today/__init__.py +++ b/antarctica_today/__init__.py @@ -1,4 +1,5 @@ import os +import sys # IMPORTANT: If we don't specify this setting, then the projection we want to use will # be replaced with another (and this warning will be printed)! @@ -10,3 +11,11 @@ # used instead. To use the original CRS, set the OSR_USE_NON_DEPRECATED configuration # option to NO. os.environ["OSR_USE_NON_DEPRECATED"] = "NO" + + +# Ignore warnings by default, while still allowing users to change the behavior, e.g. by +# upgrading them to exceptions. +if not sys.warnoptions: + import warnings + + warnings.simplefilter("ignore") diff --git a/antarctica_today/compute_mean_climatology.py b/antarctica_today/compute_mean_climatology.py index bc6957c..53a01f2 100644 --- a/antarctica_today/compute_mean_climatology.py +++ b/antarctica_today/compute_mean_climatology.py @@ -6,6 +6,7 @@ import numpy import pandas +from loguru import logger from osgeo import gdal from antarctica_today.melt_array_picklefile import ( @@ -33,7 +34,6 @@ def compute_daily_climatology_pixel_averages( baseline_end_year: int = 2020, melt_end_mmdd: Tuple[int, int] = (4, 30), output_picklefile: Path = daily_melt_averages_picklefile, - verbose: bool = True, ) -> Tuple[numpy.ndarray, Dict[Tuple[int, int], int]]: """Compute fraction of days in the baseline period in which each give pixel melts. @@ -171,8 +171,7 @@ def compute_daily_climatology_pixel_averages( with open(output_picklefile, "wb") as f: pickle.dump((average_melt_array, baseline_dates_mmdd_dict), f) - if verbose: - print(output_picklefile, "written.") + logger.debug(f"{output_picklefile} written.") return average_melt_array, baseline_dates_mmdd_dict @@ -180,21 +179,19 @@ def compute_daily_climatology_pixel_averages( def read_daily_melt_averages_picklefile( build_picklefile_if_not_present: bool = True, daily_climatology_picklefile: Path = daily_melt_averages_picklefile, - verbose: bool = True, ): """Read the daily climatology averages picklefile.""" if not os.path.exists(daily_climatology_picklefile): if build_picklefile_if_not_present: return compute_daily_climatology_pixel_averages( - output_picklefile=daily_climatology_picklefile, verbose=verbose + output_picklefile=daily_climatology_picklefile, ) else: raise FileNotFoundError( "Picklefile '{0}' not found.".format(daily_climatology_picklefile) ) - if verbose: - print("Reading", daily_climatology_picklefile) + logger.debug(f"Reading {daily_climatology_picklefile}") with open(daily_climatology_picklefile, "rb") as f: array, dt_dict = pickle.load(f) @@ -205,7 +202,6 @@ def read_daily_melt_averages_picklefile( def compute_daily_sum_pixel_averages( daily_picklefile: Path = daily_melt_averages_picklefile, sum_picklefile: Path = daily_cumulative_melt_averages_picklefile, - verbose: bool = True, ) -> None: """Compute a mean daily cumulative melt-day value for each pixel throughout the melt season. @@ -219,7 +215,7 @@ def compute_daily_sum_pixel_averages( directly comparable to a given daily-sum value during the melt season. """ # First, read the daily melt value picklefile. - daily_array, dt_dict = read_daily_melt_averages_picklefile(verbose=verbose) + daily_array, dt_dict = read_daily_melt_averages_picklefile() daily_sum_array: numpy.ndarray = numpy.zeros(daily_array.shape, dtype=numpy.int32) for dt in dt_dict: daily_sum_array[:, :, dt_dict[dt]] = numpy.array( @@ -229,34 +225,28 @@ def compute_daily_sum_pixel_averages( dtype=numpy.int32, ) - if verbose: - print("Writing", sum_picklefile, end="...") - + logger.debug(f"Writing {sum_picklefile} ...") with open(sum_picklefile, "wb") as f: pickle.dump((daily_sum_array, dt_dict), f) - - if verbose: - print("Done.") + logger.debug("Done.") def read_daily_sum_melt_averages_picklefile( build_picklefile_if_not_present: bool = True, daily_sum_picklefile: Path = daily_cumulative_melt_averages_picklefile, - verbose: bool = True, ): """Read the daily climatology averages picklefile.""" if not os.path.exists(daily_sum_picklefile): if build_picklefile_if_not_present: return compute_daily_climatology_pixel_averages( - output_picklefile=daily_sum_picklefile, verbose=verbose + output_picklefile=daily_sum_picklefile, ) else: raise FileNotFoundError( "Picklefile '{0}' not found.".format(daily_sum_picklefile) ) - if verbose: - print("Reading", daily_sum_picklefile) + logger.debug(f"Reading {daily_sum_picklefile}") f = open(daily_sum_picklefile, "rb") array, dt_dict = pickle.load(f) f.close() @@ -271,7 +261,6 @@ def create_baseline_climatology_tif( f_out_std: str = std_climatology_geotiff, round_to_integers: bool = True, gap_filled: bool = True, - verbose: bool = True, ) -> numpy.ndarray: """Generate a "mean annual melt" map over the baseline period. @@ -281,15 +270,15 @@ def create_baseline_climatology_tif( """ # Read the gridded satellite data if gap_filled: - model_array, datetimes_dict = read_gap_filled_melt_picklefile(verbose=verbose) + model_array, datetimes_dict = read_gap_filled_melt_picklefile() else: model_array, datetimes_dict = read_model_array_picklefile( - resample_melt_codes=True, verbose=verbose + resample_melt_codes=True, ) datetimes = list(datetimes_dict.keys()) num_years = int((end_date - start_date).days / 365.25) - # print(num_years) + # logger.info(num_years) annual_sum_grids: numpy.ndarray = numpy.empty( model_array.shape[0:2] + (num_years,), dtype=int @@ -339,8 +328,8 @@ def create_baseline_climatology_tif( f_std_base, f_std_ext = os.path.splitext(f_out_std) f_out_std = f_std_base + "_gap_filled" + f_std_ext - output_gtif(annual_mean_array, f_out_mean, nodata=-1, verbose=verbose) - output_gtif(annual_std_array, f_out_std, nodata=-1, verbose=verbose) + output_gtif(annual_mean_array, f_out_mean, nodata=-1) + output_gtif(annual_std_array, f_out_std, nodata=-1) return annual_mean_array @@ -349,7 +338,6 @@ def create_partial_year_melt_anomaly_tif( current_datetime: Optional[datetime.datetime] = None, dest_fname: Optional[str] = None, gap_filled: bool = True, - verbose: bool = True, ) -> numpy.ndarray: """Create a tif of melt anomaly compared to baseline climatology for that day of the melt season.""" # If no datetime is given, use "today" @@ -382,13 +370,13 @@ def create_partial_year_melt_anomaly_tif( day=first_mmdd_of_melt_season[1], ) - # print(current_datetime) - # print(first_dt_of_present_melt_season) + # logger.info(current_datetime) + # logger.info(first_dt_of_present_melt_season) if gap_filled: - melt_array, dt_dict = read_gap_filled_melt_picklefile(verbose=verbose) + melt_array, dt_dict = read_gap_filled_melt_picklefile() else: melt_array, dt_dict = read_model_array_picklefile( - resample_melt_codes=True, verbose=verbose + resample_melt_codes=True, ) dt_list = sorted(list(dt_dict.keys())) @@ -403,7 +391,7 @@ def create_partial_year_melt_anomaly_tif( # If we don't have days in the picklefile up to the current date, readjust the date and inform the user. if dts_masked[-1] < current_datetime: - print( + logger.info( "{0} not in the melt files. Adjusting to last known date: {1}".format( current_datetime.strftime("%Y-%m-%d"), dts_masked[-1].strftime("%Y-%m-%d"), @@ -445,7 +433,7 @@ def create_partial_year_melt_anomaly_tif( ), ) - output_gtif(anomalies_int, dest_fname, nodata=-999, verbose=verbose) + output_gtif(anomalies_int, dest_fname, nodata=-999) return anomalies_int @@ -455,7 +443,6 @@ def create_annual_melt_anomaly_tif( year_melt_tif: Optional[str] = None, baseline_melt_tif: Optional[str] = None, gap_filled: bool = True, - verbose: bool = True, ): """Create a tif of annual melt anomaly compared to baseline climatology. @@ -485,13 +472,16 @@ def create_annual_melt_anomaly_tif( anomaly_array[ice_mask == 0] = -999 - output_gtif(anomaly_array, dest_fname, verbose=verbose, nodata=-999) + output_gtif(anomaly_array, dest_fname, nodata=-999) return anomaly_array def read_annual_melt_anomaly_tif( - year, anomaly_tif=None, gap_filled=True, generate_if_nonexistent=True, verbose=True + year, + anomaly_tif=None, + gap_filled=True, + generate_if_nonexistent=True, ): """Read the annual anomaly tif.""" if anomaly_tif is None: @@ -505,12 +495,12 @@ def read_annual_melt_anomaly_tif( if not os.path.exists(anomaly_tif): array = create_annual_melt_anomaly_tif( - year=year, gap_filled=gap_filled, verbose=verbose + year=year, + gap_filled=gap_filled, ) return array - if verbose: - print("Reading", anomaly_tif) + logger.debug(f"Reading {anomaly_tif}") ds = gdal.Open(str(anomaly_tif), gdal.GA_ReadOnly) if ds is None: @@ -575,7 +565,6 @@ def create_annual_melt_sum_tif( melt_start_mmdd: Tuple[int, int] = (10, 1), melt_end_mmdd: Tuple[int, int] = (4, 30), gap_filled: bool = True, - verbose: bool = True, ) -> Optional[numpy.ndarray]: """Create an integer tif file of that year's annual sum of melt-days, per pixel. @@ -585,10 +574,10 @@ def create_annual_melt_sum_tif( changing that. """ if gap_filled: - melt_array, datetimes_dict = read_gap_filled_melt_picklefile(verbose=verbose) + melt_array, datetimes_dict = read_gap_filled_melt_picklefile() else: melt_array, datetimes_dict = read_model_array_picklefile( - resample_melt_codes=True, verbose=verbose + resample_melt_codes=True, ) dt_list = list(datetimes_dict.keys()) @@ -649,7 +638,7 @@ def create_annual_melt_sum_tif( base, ext = os.path.splitext(output_fname) output_fname = base + "_gap_filled" + ext - output_gtif(melt_array_year, output_fname, nodata=-1, verbose=verbose) + output_gtif(melt_array_year, output_fname, nodata=-1) return melt_array_year @@ -661,7 +650,6 @@ def save_climatologies_as_CSV( doy_start=(10, 1), doy_end=(4, 30), gap_filled=True, - verbose=True, ): """Compute the percentiles of climatologies and save them as a pandas dataframe for later use.""" baseline_melt_percentiles_for_each_basin = _generate_baseline_melt_climatology( @@ -671,7 +659,6 @@ def save_climatologies_as_CSV( doy_end=doy_end, include_regional_totals=True, gap_filled=gap_filled, - verbose=verbose, ) assert len(baseline_melt_percentiles_for_each_basin) == len(antarctic_regions_dict) @@ -758,8 +745,7 @@ def save_climatologies_as_CSV( with open(csv_file, "w") as f: f.write(text_all) - if verbose: - print(csv_file, "written.") + logger.debug(f"Wrote {csv_file}") return @@ -771,7 +757,6 @@ def _generate_baseline_melt_climatology( doy_end=(4, 30), # (MM,DD) include_regional_totals=True, gap_filled=True, - verbose=True, ): """Generate the data for a climatology plot. @@ -791,13 +776,12 @@ def _generate_baseline_melt_climatology( If True, plot each of the sub-regions as well. """ if gap_filled: - melt_array, datetime_dict = read_gap_filled_melt_picklefile(verbose=verbose) + melt_array, datetime_dict = read_gap_filled_melt_picklefile() else: melt_array, datetime_dict = read_model_array_picklefile( fill_pole_hole=True, resample_melt_codes=True, resample_melt_code_threshold=4, - verbose=verbose, ) ice_mask = get_ice_mask_array() @@ -839,15 +823,15 @@ def _generate_baseline_melt_climatology( def open_baseline_climatology_csv_as_dataframe( - csv_file=baseline_percentiles_csv, gap_filled=True, verbose=True + csv_file=baseline_percentiles_csv, + gap_filled=True, ): """Open the dataframe for the baseline period climatology percentiles, and return a pandas dataframe.""" if gap_filled and os.path.split(csv_file)[1].find("gap_filled") == -1: base, ext = os.path.splitext(csv_file) csv_file = base + "_gap_filled" + ext - if verbose: - print("Reading", csv_file) + logger.debug(f"Reading {csv_file}") return pandas.read_csv(csv_file, header=19) @@ -871,8 +855,8 @@ def _get_regional_tif_masks(tifname=antarctic_regions_tif): try: assert numpy.all((ice_mask > 0) == (region_array > 0)) except AssertionError as e: - print("ice_mask:", numpy.count_nonzero(ice_mask > 0)) - print("regions: ", numpy.count_nonzero(region_array > 0)) + logger.error(f"ice_mask: {numpy.count_nonzero(ice_mask > 0)}") + logger.error("regions: {numpy.count_nonzero(region_array > 0)}") raise e output_mask_dict = {} @@ -1062,15 +1046,15 @@ def _compute_baseline_climatology_lists( def read_daily_melt_numbers_as_dataframe( - csv_file=daily_melt_csv, gap_filled=True, verbose=True + csv_file=daily_melt_csv, + gap_filled=True, ): """Read the daily melt files, return a Pandas dataframe.""" if gap_filled and os.path.split(csv_file)[1].find("gap_filled") == -1: base, ext = os.path.splitext(csv_file) csv_file = base + "_gap_filled" + ext - if verbose: - print("Reading", csv_file) + logger.debug(f"Reading {csv_file}") # Read the dataframe. Convert the "date" field to a date object. df = pandas.read_csv(csv_file, header=18, converters={"date": pandas.to_datetime}) @@ -1079,7 +1063,8 @@ def read_daily_melt_numbers_as_dataframe( def save_daily_melt_numbers_to_csv( - csv_file=daily_melt_csv, gap_filled=True, verbose=True + csv_file=daily_melt_csv, + gap_filled=True, ): """Compute climatologies for all regions on every day of the dataset, save to a .csv file.""" text_lines = [ @@ -1124,14 +1109,13 @@ def save_daily_melt_numbers_to_csv( text_lines.append(csv_fields_line) if gap_filled: - melt_array, datetime_dict = read_gap_filled_melt_picklefile(verbose=False) + melt_array, datetime_dict = read_gap_filled_melt_picklefile() else: melt_array, datetime_dict = read_model_array_picklefile( fill_pole_hole=True, filter_out_error_swaths=True, resample_melt_codes=True, resample_melt_code_threshold=4, - verbose=False, ) ice_mask = get_ice_mask_array() @@ -1208,8 +1192,7 @@ def save_daily_melt_numbers_to_csv( with open(csv_file, "w") as f: f.write(text_all) - if verbose: - print(csv_file, "written.") + logger.info(f"Wrote {csv_file}") if __name__ == "__main__": diff --git a/antarctica_today/generate_antarctica_today_map.py b/antarctica_today/generate_antarctica_today_map.py index e56a4c8..dceede0 100644 --- a/antarctica_today/generate_antarctica_today_map.py +++ b/antarctica_today/generate_antarctica_today_map.py @@ -20,6 +20,7 @@ import matplotlib.pyplot as plt import numpy import PIL +from loguru import logger from osgeo import gdal from antarctica_today import read_NSIDC_bin_file, write_NSIDC_bin_to_gtif @@ -53,7 +54,8 @@ def main(): """Do stuff I want to do here.""" m = AT_map_generator( - fill_pole_hole=False, filter_out_error_swaths=True, verbose=True + fill_pole_hole=False, + filter_out_error_swaths=True, ) for region in [ @@ -207,13 +209,6 @@ def read_and_parse_args(): default=False, help="Omit the legend. Default if not set: include a legend.", ) - parser.add_argument( - "--verbose", - "-v", - action="store_true", - default=False, - help="Increase output verbosity.", - ) return parser.parse_args() @@ -230,7 +225,6 @@ def __init__( melt_array_picklefile=model_results_picklefile, fill_pole_hole=True, filter_out_error_swaths=True, - verbose=True, ): """Initialize the class.""" self.melt_array_picklefile = melt_array_picklefile @@ -240,7 +234,6 @@ def __init__( # Options for reading and/or gap-filling the data. self.OPT_fill_pole_hole = fill_pole_hole self.OPT_filter_out_error_swaths = filter_out_error_swaths - self.OPT_verbose = verbose # Containers to store the pickled data for each basemap figure and axes object. # Can generate these once and save them to a picklefile, both on disk and @@ -288,7 +281,6 @@ def get_melt_array_picklefile_and_datetimes(self): ) = read_model_array_picklefile( fill_pole_hole=self.OPT_fill_pole_hole, filter_out_error_swaths=self.OPT_filter_out_error_swaths, - verbose=self.OPT_verbose, ) return self.cached_melt_array, self.cached_datetime_dict @@ -330,7 +322,6 @@ def _read_melt_array_and_datetimes(self): picklefile=self.melt_array_picklefile, fill_pole_hole=self.OPT_fill_pole_hole, filter_out_error_swaths=self.OPT_filter_out_error_swaths, - verbose=self.OPT_verbose, ) return self.melt_array, self.datetimes_dict @@ -452,8 +443,7 @@ def _get_mountains_geodataframe(self): Use the cached version if already read. """ if self.mountains_df is None: - if self.OPT_verbose: - print("Reading", mountains_shapefile_path) + logger.debug(f"Reading {mountains_shapefile_path}") self.mountains_df = geopandas.read_file( mountains_shapefile_path, crs=self.SPS_projection.proj4_init ) @@ -561,8 +551,7 @@ def _generate_new_baseline_map_figure( f = open(fname, "wb") pickle.dump(fig, f) f.close() - if self.OPT_verbose: - print(fname, "written.") + logger.debug(f"Wrote {fname}") return fig, ax @@ -579,8 +568,7 @@ def _read_baseline_map_picklefile(self, map_type="daily", region_number=0): if not os.path.exists(fname): return None, None - if self.OPT_verbose: - print("Reading", fname) + logger.debug(f"Reading {fname}") # Read the picklefile f = open(fname, "rb") @@ -898,8 +886,7 @@ def _strip_empty_image_border(self, filename): return # svgclip.py isn't working... can't seem to resolve the Rsvg namespace. # svgclip.clip(filename, filename, margin=0) - # if self.OPT_verbose: - # print(filename, "trimmed.") + # logger.debug(f"Trimmed {filename}") else: bg = PIL.Image.new(im.mode, im.size, im.getpixel((0, 0))) @@ -909,8 +896,7 @@ def _strip_empty_image_border(self, filename): if bbox: im2 = im.crop(bbox) im2.save(filename) - if self.OPT_verbose: - print(filename, "trimmed.") + logger.debug(f"Trimmed {filename}") return @@ -1413,9 +1399,9 @@ def plot_test_image(self, infile, outfile): import numpy - print(numpy.unique(data_array)) + logger.info(numpy.unique(data_array)) for i in range(-1, 8 + 1): - print(i, numpy.count_nonzero(data_array == i)) + logger.info(f"{i} {numpy.count_nonzero(data_array == i)}") # Plot the data ax.pcolormesh( @@ -1436,7 +1422,7 @@ def plot_test_image(self, infile, outfile): fig.savefig(outfile, dpi=150, format="eps") else: fig.savefig(outfile, dpi=150) - print(outfile, "written.") + logger.info(f"Wrote {outfile}") self._strip_empty_image_border(outfile) @@ -1517,8 +1503,7 @@ def generate_daily_melt_map( infile = os.path.join(model_results_dir, max(os.listdir(model_results_dir))) # 1: Read the data file into an array. - if self.OPT_verbose: - print("Reading", infile) + logger.debug(f"Reading {infile}") infile_ext = os.path.splitext(infile)[-1].lower() # If it's a GeoTiff @@ -1597,8 +1582,7 @@ def generate_daily_melt_map( else: fig.savefig(outfile, dpi=new_dpi) - if self.OPT_verbose: - print(outfile, "written.") + logger.debug(f"Wrote {outfile}") self._strip_empty_image_border(outfile) @@ -1773,8 +1757,7 @@ def generate_annual_melt_map( else: fig.savefig(outfile_fname, dpi=new_dpi) - if self.OPT_verbose: - print(outfile_fname, "written.") + logger.debug(f"Wrote {outfile_fname}") self._strip_empty_image_border(outfile_fname) @@ -1800,7 +1783,6 @@ def generate_anomaly_melt_map( keep_year_label_wrapped=True, reset_picklefile=False, message_below_year="relative to 1990-2020", - verbose=True, ): """Generate a cumulative annual anomaly melt map compared to the baseline climatology period. @@ -1874,7 +1856,7 @@ def generate_anomaly_melt_map( if mmdd_of_year is None: # Just get the annual anomlay map for that year. - anomaly_data = read_annual_melt_anomaly_tif(year=year, verbose=verbose) + anomaly_data = read_annual_melt_anomaly_tif(year=year) else: datetime_this_year = datetime.datetime( year=year @@ -1885,7 +1867,6 @@ def generate_anomaly_melt_map( anomaly_data = create_partial_year_melt_anomaly_tif( current_datetime=datetime_this_year, gap_filled=False, - verbose=verbose, ) if anomaly_data is None: @@ -1952,8 +1933,7 @@ def generate_anomaly_melt_map( else: fig.savefig(outfile_fname, dpi=new_dpi) - if self.OPT_verbose: - print(outfile_fname, "written.") + logger.debug(f"Wrote {outfile_fname}") self._strip_empty_image_border(outfile_fname) @@ -1977,7 +1957,6 @@ def generate_latest_partial_anomaly_melt_map( keep_year_label_wrapped=True, reset_picklefile=False, message_below_year=None, - verbose=True, ): """Same as generate_anomaly_melt_map, but do it for only a partial year, up until the last day of data that we have in the melt array. @@ -2009,7 +1988,6 @@ def generate_latest_partial_anomaly_melt_map( keep_year_label_wrapped=keep_year_label_wrapped, reset_picklefile=reset_picklefile, message_below_year=message_below_year, - verbose=verbose, ) @@ -2025,7 +2003,7 @@ def SPECIAL_make_map_with_borders(year=2020): DATA_QGIS_DIR / "basins " / "Antarctic_Regions_v2_interior_borders.shp" ) - at = AT_map_generator(fill_pole_hole=False, verbose=True) + at = AT_map_generator(fill_pole_hole=False) for fmt in ("png", "svg"): # for fmt in ("png",): fname = os.path.join( @@ -2137,7 +2115,7 @@ def SPECIAL_make_map_with_borders(year=2020): fig.savefig(fname, dpi=dpi) at._strip_empty_image_border(fname) - print(fname, "overwritten.") + logger.info(f"Overwrote {fname}") if __name__ == "__main__": diff --git a/antarctica_today/generate_daily_melt_file.py b/antarctica_today/generate_daily_melt_file.py index 51d82fd..5e60a96 100644 --- a/antarctica_today/generate_daily_melt_file.py +++ b/antarctica_today/generate_daily_melt_file.py @@ -21,6 +21,7 @@ import numpy import xarray +from loguru import logger from antarctica_today import tb_file_data, write_NSIDC_bin_to_gtif from antarctica_today.melt_array_picklefile import get_ice_mask_array @@ -91,11 +92,13 @@ def generate_new_daily_melt_files( # Make sure there's at least one of each file (i.e. exactly one). If not, just skip & continue if len(nsidc_fps) == 0: if warn_if_missing_files: - warnings.warn( + msg = ( "Warning: At least one NSIDC Tb file on date '" + dt.strftime("%Y%m%d") + "' is missing. Skipping that date." ) + logger.warning(msg) + warnings.warn(msg) continue threshold_file = get_correct_threshold_file(dt) @@ -119,7 +122,6 @@ def create_daily_melt_file( output_bin_filename, output_gtif_filename=None, Tb_nodata_value=-999, - verbose=True, ) -> numpy.ndarray: """Read input files and generate a daily melt file. Primary function.""" output_array = read_files_and_generate_melt_array( @@ -131,7 +133,10 @@ def create_daily_melt_file( # Write the output .bin file # write_flat_binary.write_array_to_binary( write_array_to_binary( - output_array, output_bin_filename, numbytes=2, signed=True, verbose=verbose + output_array, + output_bin_filename, + numbytes=2, + signed=True, ) # Write the output.tif file, if called for @@ -142,7 +147,6 @@ def create_daily_melt_file( resolution=25, hemisphere="S", nodata=None, - verbose=verbose, ) return output_array diff --git a/antarctica_today/generate_gap_filled_melt_picklefile.py b/antarctica_today/generate_gap_filled_melt_picklefile.py index 673fca0..a6b38fb 100644 --- a/antarctica_today/generate_gap_filled_melt_picklefile.py +++ b/antarctica_today/generate_gap_filled_melt_picklefile.py @@ -7,6 +7,7 @@ import pickle import numpy +from loguru import logger from antarctica_today.compute_mean_climatology import ( read_daily_melt_averages_picklefile, @@ -15,19 +16,18 @@ from antarctica_today.tb_file_data import gap_filled_melt_picklefile -def save_gap_filled_picklefile(picklefile=gap_filled_melt_picklefile, verbose=True): +def save_gap_filled_picklefile(picklefile=gap_filled_melt_picklefile): """Write the picklefile.""" - array, datetimes_dict = fill_melt_array_with_interpolations(verbose=verbose) + array, datetimes_dict = fill_melt_array_with_interpolations() picklefile.parent.mkdir(parents=True, exist_ok=True) f = open(picklefile, "wb") pickle.dump((array, datetimes_dict), f) f.close() - if verbose: - print(picklefile, "written.") + logger.debug(f"Wrote {picklefile}") -def fill_melt_array_with_interpolations(array=None, datetimes_dict=None, verbose=True): +def fill_melt_array_with_interpolations(array=None, datetimes_dict=None): """Take the mean melt array, fill it with interpolations from the mean climatology. The array and datetimes_dict can be sent as parameters if they've already been read. @@ -40,9 +40,7 @@ def fill_melt_array_with_interpolations(array=None, datetimes_dict=None, verbose """ # Get the dates and if array == None or datetimes_dict == None: - array, datetimes_dict = read_model_array_picklefile( - resample_melt_codes=True, verbose=verbose - ) + array, datetimes_dict = read_model_array_picklefile(resample_melt_codes=True) avg_array, avg_dt_dict = read_daily_melt_averages_picklefile( build_picklefile_if_not_present=True diff --git a/antarctica_today/generate_plots_for_given_day.py b/antarctica_today/generate_plots_for_given_day.py index 164b7bf..752f2d9 100644 --- a/antarctica_today/generate_plots_for_given_day.py +++ b/antarctica_today/generate_plots_for_given_day.py @@ -10,6 +10,7 @@ import dateutil.parser import matplotlib.pyplot +from loguru import logger from antarctica_today import ( generate_antarctica_today_map, @@ -105,7 +106,6 @@ def generate_maps_and_plots_for_a_date( gap_filled=True, dpi=dpi, outfile=lineplot_outfile, - verbose=True, ) # Close the current plots open in matplotlib. (Keeps them from accumulating.) @@ -174,7 +174,7 @@ def generate_maps_and_plots_for_a_date( ) if not os.path.exists(dest_dir_location): os.mkdir(dest_dir_location) - print("Created directory '{0}'.".format(dest_dir_location)) + logger.info("Created directory '{0}'.".format(dest_dir_location)) for fn in files_to_move: src = fn @@ -184,7 +184,7 @@ def generate_maps_and_plots_for_a_date( shutil.copyfile(src, dst) - print("{0} -> {1}.".format(src, dst)) + logger.info("{0} -> {1}.".format(src, dst)) def define_and_parse_args(): diff --git a/antarctica_today/long_term_trends.py b/antarctica_today/long_term_trends.py index 3cade91..27d3af3 100644 --- a/antarctica_today/long_term_trends.py +++ b/antarctica_today/long_term_trends.py @@ -11,6 +11,7 @@ import numpy import statsmodels import statsmodels.api +from loguru import logger from osgeo import gdal from statsmodels.stats.outliers_influence import summary_table @@ -151,7 +152,6 @@ def plot_time_series( offset_years_by_one=True, add_confidence_intervals=True, add_prediction_intervals=True, - verbose=True, ): """Create a plot of the time series of melt. @@ -265,21 +265,19 @@ def plot_time_series( # If go into all this if we've indicated we might want to plot a trendline. if include_trendline or include_trendline_only_if_significant: - # print(results.params) - # print(results.pvalues) + # logger.info(results.params) + # logger.info(results.pvalues) pval_int, pval_slope = results.pvalues intercept, slope = results.params # fit_func = numpy.poly1d((slope, intercept)) if print_trendline_summary: - print("\n") - print( - "============", - antarctic_regions_dict[region_n] + ",", - melt_index_or_extent, - "==============", + logger.info("\n") + logger.info( + f"============ {antarctic_regions_dict[region_n]} ," + f"{melt_index_or_extent} ==============" ) - print(results.summary()) + logger.info(results.summary()) if include_trendline or ( pval_slope <= 0.05 and include_trendline_only_if_significant @@ -388,8 +386,7 @@ def plot_time_series( else: fname = fname_template.format(region_n) fig.savefig(fname, dpi=dpi) - if verbose: - print(fname, "written.") + logger.debug(f"Wrote {fname}") plt.close(fig) @@ -450,7 +447,7 @@ def special_plot_antarctica_and_peninsula_index( for fmt in (".png", ".svg"): figname = os.path.splitext(figname)[0] + fmt fig.savefig(figname, dpi=600) - print(figname, "written.") + logger.info(f"Wrote {figname}") return results @@ -462,7 +459,7 @@ def special_plot_antarctica_and_all_regions( ): """Make a special plot for the BAMS report, having all the regions in it.""" fig, axes = plt.subplots(2, 4, sharex=True, sharey=False, figsize=(12.0, 4.0)) - print(axes) + logger.info(axes) for region in range(8): ax = axes[int(int(region) / 4), int(region % 4)] @@ -574,7 +571,7 @@ def special_plot_antarctica_and_all_regions( for fmt in (".png", ".svg"): figname = os.path.splitext(figname)[0] + fmt fig.savefig(figname, dpi=600) - print(figname, "written.") + logger.info(f"Wrote {figname}") return @@ -585,13 +582,12 @@ def compare_ind_year_to_baseline_averages( baseline_end=2019, gap_filled=True, omit_1987=True, - verbose=True, ): """Print a chart that compares the baseline annual sum melt indices (mean, std, min_base, max_base, min_all, max_all, mi_this_year) for each region. """ - print("All values in km2*days, x 1e3.") - print( + logger.info("All values in km2*days, x 1e3.") + logger.info( "R# | {0:>8s} | {1:>8s} | {2:>8s} | {3:>8s} | {4:>8s} | {5:>8s} | {6:>8s} | {7:>8d}".format( "BL-med", "BL-mean", @@ -627,7 +623,7 @@ def compare_ind_year_to_baseline_averages( assert len(melt_i_this_year) == 1 melt_i_this_year = melt_i_this_year[0] - print( + logger.info( "{0:>2d} | {1:>8.1f} | {2:>8.1f} | {3:>8.1f} | {4:>8.1f} | {5:>8.1f} | {6:>8.1f} | {7:>8.1f} | {8:>8.1f}".format( region, baseline_med * 1e-3, @@ -656,7 +652,6 @@ def compare_ind_year_to_baseline_averages( # offset_years_by_one=True, # include_trendline=False, # dpi=300, - # extent_melt_days_threshold = 1, - # verbose=True) + # extent_melt_days_threshold = 1) # special_plot_antarctica_and_all_regions() compare_ind_year_to_baseline_averages(2021) diff --git a/antarctica_today/melt_array_picklefile.py b/antarctica_today/melt_array_picklefile.py index d5b6018..4843e40 100755 --- a/antarctica_today/melt_array_picklefile.py +++ b/antarctica_today/melt_array_picklefile.py @@ -18,6 +18,7 @@ from typing import Any, Dict, Tuple import numpy +from loguru import logger from osgeo import gdal from antarctica_today.map_filedata import ice_mask_tif @@ -76,37 +77,36 @@ def find_largest_melt_days_in_an_interval( top_dts = datetimes_in_interval[max_interval_index] if top_n: - print(top_melt) - print([dt.astype(datetime.datetime).strftime("%Y-%m-%d") for dt in top_dts]) + logger.info(str(top_melt)) + logger.info( + str([dt.astype(datetime.datetime).strftime("%Y-%m-%d") for dt in top_dts]) + ) else: - print( + logger.info( "{0} km2 in {1}".format( top_melt, top_dts.astype(datetime.datetime).strftime("%Y-%m-%d") ) ) -def get_array_from_model_files(file_dir=model_results_dir, verbose=True): +def get_array_from_model_files(file_dir=model_results_dir, progress=True): """Take the individual .bin arrays for each day, and turn it into a M x N x T shaped numpy array.""" file_list = recurse_directory(file_dir) first_file_data = read_NSIDC_bin_file(file_list[0], return_type=int) - # print(first_file_data.shape) - # print(first_file_data) - # print(numpy.unique(first_file_data)) # Values are -1, 0, 1, 2... look from + # logger.info(first_file_data.shape) + # logger.info(first_file_data) + # logger.info(numpy.unique(first_file_data)) # Values are -1, 0, 1, 2... look from # Tom what each of those values actually means. # 3D array, Y x X x T array_shape = first_file_data.shape + (len(file_list),) data_array = numpy.empty(array_shape, dtype=first_file_data.dtype) - if verbose: - print( - "Retrieving melt data from {0} binary (.bin) files.".format(len(file_list)) - ) + logger.debug(f"Retrieving melt data from {len(file_list)} binary (.bin) files.") for i, fname in enumerate(file_list): - if verbose: + if progress: ProgressBar( i + 1, len(file_list), @@ -134,7 +134,7 @@ def save_model_array_picklefile( pickle.dump((data_array, datetime_dict), f) f.close() - print(picklefile, "written.") + logger.info(f"Wrote {picklefile}") return data_array, datetime_dict @@ -158,7 +158,6 @@ def read_model_array_picklefile( filter_out_error_swaths=True, resample_melt_codes=False, resample_melt_code_threshold=4, - verbose=True, ): """Read the model array picklefile. @@ -171,13 +170,11 @@ def read_model_array_picklefile( and irrelevant to the v3 data. Just keep "resample_melt_codes" to False when running with v3 code. """ - if verbose: - print("Reading", os.path.split(picklefile)[-1] + "...", end="") + logger.debug(f"Reading {os.path.split(picklefile)[-1]}...") f = open(picklefile, "rb") model_array, datetime_dict = pickle.load(f) f.close() - if verbose: - print("Done.") + logger.debug("Done.") if fill_pole_hole: # Fill the pole hole (any missing values) with "no melt" (1) @@ -324,11 +321,9 @@ def _filter_out_erroneous_swaths(model_array, datetimes_dict): def read_gap_filled_melt_picklefile( picklefile: Path = gap_filled_melt_picklefile, - verbose: bool = True, ) -> Tuple[numpy.ndarray, Dict[datetime.datetime, int]]: """Read the gap-filled picklefile, return to user.""" - if verbose: - print("Reading", picklefile) + logger.debug(f"Reading {picklefile}") with open(picklefile, "rb") as f: array, dt_dict = pickle.load(f) diff --git a/antarctica_today/nsidc_download_Tb_data.py b/antarctica_today/nsidc_download_Tb_data.py index ef5501b..2ef23e6 100644 --- a/antarctica_today/nsidc_download_Tb_data.py +++ b/antarctica_today/nsidc_download_Tb_data.py @@ -41,6 +41,7 @@ import dateutil.parser import earthaccess +from loguru import logger from antarctica_today.constants.paths import DATA_TB_DIR @@ -72,7 +73,7 @@ def build_version_query_params(version): desired_pad_length = 3 if len(version) > desired_pad_length: - print('Version string too long: "{0}"'.format(version)) + logger.info('Version string too long: "{0}"'.format(version)) quit() version = str(int(version)) # Strip off any leading zeros @@ -142,7 +143,9 @@ def output_progress(count, total, status="", bar_len=60): percents = int(round(100.0 * fraction)) bar = "=" * filled_len + " " * (bar_len - filled_len) fmt = " [{0}] {1:3d}% {2} ".format(bar, percents, status) - print("\b" * (len(fmt) + 4), end="") # clears the line + + # Clear the line + print("\b" * (len(fmt) + 4), end="") # noqa: T201 sys.stdout.write(fmt) sys.stdout.flush() @@ -156,25 +159,23 @@ def cmr_read_in_chunks(file_object, chunk_size=1024 * 1024): yield data -def cmr_download(urls, force=False, quiet=False, output_directory=None): +def cmr_download(urls, force=False, progress=False, output_directory=None): """Download files from list of urls.""" if not urls: return url_count = len(urls) - if not quiet: - print(f"Downloading {url_count} files...") + logger.info(f"Downloading {url_count} files...") files_saved = [] for index, url in enumerate(urls, start=1): filename = url.split("/")[-1] - if not quiet: - print( - "{0}/{1}: {2}".format( - str(index).zfill(len(str(url_count))), url_count, filename - ) + logger.debug( + "{0}/{1}: {2}".format( + str(index).zfill(len(str(url_count))), url_count, filename ) + ) # Put the new file into the output directory where we want it. if output_directory: @@ -189,8 +190,7 @@ def cmr_download(urls, force=False, quiet=False, output_directory=None): length = int(response.headers["content-length"]) try: if not force and length == os.path.getsize(filename): - if not quiet: - print(" File exists, skipping") + logger.debug(" File exists, skipping") continue except OSError: pass @@ -201,21 +201,21 @@ def cmr_download(urls, force=False, quiet=False, output_directory=None): with open(filename, "wb") as out_file: for data in cmr_read_in_chunks(response, chunk_size=chunk_size): out_file.write(data) - if not quiet: + if progress: count = count + 1 time_elapsed = time.time() - time_initial download_speed = get_speed(time_elapsed, count * chunk_size) output_progress(count, max_chunks, status=download_speed) - if not quiet: - print() + logger.debug("") files_saved.append(filename) except HTTPError as e: - print(f"HTTP error {e.code}, {e.reason} ({e.url})".format(e.code, e.reason)) + logger.error(f"HTTP error {e.code}, {e.reason} ({e.url})") raise except URLError as e: - print(f"URL error: {e.reason} ({e.url})") + logger.warning(f"URL error: {e.reason} ({e.url})") + # TODO: Why don't we `raise` here? except IOError: raise @@ -301,11 +301,11 @@ def download_new_files( if only_in_melt_season: results = filter_data_only_in_melt_season(results) - print( + logger.info( f"Found {len(results)} downloadable granules within the Antarctic melt season." ) else: - print(f"Found {len(results)} downloadable granules.") + logger.info(f"Found {len(results)} downloadable granules.") # If there are no granules to download, return an empty list of files without bothering to call "download()." if len(results) == 0: diff --git a/antarctica_today/plot_daily_melt_and_climatology.py b/antarctica_today/plot_daily_melt_and_climatology.py index 88e8fbb..d8774ce 100755 --- a/antarctica_today/plot_daily_melt_and_climatology.py +++ b/antarctica_today/plot_daily_melt_and_climatology.py @@ -12,6 +12,7 @@ import matplotlib.pyplot as plt import numpy import pandas +from loguru import logger from antarctica_today.compute_mean_climatology import ( _get_region_area_km2, @@ -96,7 +97,7 @@ def simple_plot_date_check( plt.show() else: fig.savefig(fname, dpi=300) - print(fname, "saved.") + logger.info(fname, "saved.") def _add_plot_legend(ax, loc="upper center", adjust_ylim_range=True): @@ -215,7 +216,6 @@ def plot_current_year_melt_over_baseline_stats( gap_filled=True, add_max_line=False, dpi=300, - verbose=True, ): """Read the melt data for the melt year up through the "current_datetime", and plot over the baseline climatology. @@ -235,13 +235,11 @@ def plot_current_year_melt_over_baseline_stats( outfile: Image file to write out. - verbose: Verbose output. - Return ------ None """ - df = read_daily_melt_numbers_as_dataframe(verbose=False, gap_filled=gap_filled) + df = read_daily_melt_numbers_as_dataframe(gap_filled=gap_filled) if current_date is None: current_date = df["date"].iloc[-1] @@ -289,19 +287,19 @@ def plot_current_year_melt_over_baseline_stats( if len(datetimes) == 0: return - # print("=========================================================") - # print(datetime_start, current_date) - # print(datetimes) - # print(melt_pcts) - # print(records_in_range) + # logger.info("=========================================================") + # logger.info(datetime_start, current_date) + # logger.info(datetimes) + # logger.info(melt_pcts) + # logger.info(records_in_range) datetimes, melt_pcts = _add_nans_in_gaps(datetimes, melt_pcts, gap_days_max=4) - # print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++") - # print(datetime_start, current_date) - # print(datetimes) - # print(melt_pcts) - # print(records_in_range) + # logger.info("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++") + # logger.info(datetime_start, current_date) + # logger.info(datetimes) + # logger.info(melt_pcts) + # logger.info(records_in_range) if current_date > datetimes[-1]: current_date = datetimes[-1] @@ -322,7 +320,6 @@ def plot_current_year_melt_over_baseline_stats( gap_filled=gap_filled, add_max_line=add_max_line, dpi=dpi, - verbose=verbose, ) return @@ -331,10 +328,10 @@ def plot_current_year_melt_over_baseline_stats( def _add_nans_in_gaps(datetimes, melt_pcts, gap_days_max=4): """Add nans to areas with large day gaps to make blanks in data series when plotted.""" time_deltas = [(datetimes[i + 1] - datetimes[i]) for i in range(len(datetimes) - 1)] - # print(time_deltas) + # logger.info(time_deltas) gap_indices = numpy.where([td.days > gap_days_max for td in time_deltas])[0] - # print(gap_indices) + # logger.info(gap_indices) last_gap_index = 0 new_datetimes = [] @@ -372,7 +369,6 @@ def _plot_current_year_and_baseline( gap_filled=True, add_max_line=False, dpi=300, - verbose=True, ): """Plot the current year's melt over the top of the baseline climatology. @@ -390,8 +386,6 @@ def _plot_current_year_and_baseline( outfile: Image file to write out. - verbose: Verbose output. - Return ------ None @@ -413,7 +407,6 @@ def _plot_current_year_and_baseline( add_max_line=add_max_line, current_year_percents_for_printout=current_year_percents, gap_filled=gap_filled, - verbose=verbose, ) plot_label = ( @@ -445,8 +438,7 @@ def _plot_current_year_and_baseline( base, ext = os.path.splitext(outfile) outfile = base + "_gap_filled" + ext - if verbose: - print("Plotting", outfile) + logger.debug(f"Plotting {outfile}") if os.path.splitext(outfile)[1].lower() == ".eps": fig.savefig(outfile, dpi=dpi, format="eps") else: @@ -460,15 +452,14 @@ def _get_previous_max_melt_values( fraction_or_area="fraction", region_num=0, gap_filled=True, - verbose=True, ): # For a given set of datetimes in a melt year, return the previous datetime and previous max melt value # ("fraction" or "area") of the previous highest melt in the observational record. # Return two arrays, each of equal length to "current_datetimes": # previous_melt_dts = previous datetime of each max melt. Each datetime should have the same "month, day" value and be from a previous year. # previous_melt_vals = previous maximum melt value ("fraction" or "area") associated with that datetime, in the region specified. - df = read_daily_melt_numbers_as_dataframe(gap_filled=gap_filled, verbose=verbose) - # print(df) + df = read_daily_melt_numbers_as_dataframe(gap_filled=gap_filled) + # logger.info(df) # Convert the "YYYY-MM-DD" column into datetime objects. # melt_dates_all = numpy.array([datetime.datetime.strptime(date, "%Y-%d-%m") for date in df['date']]) @@ -522,7 +513,6 @@ def _plot_baseline_climatology( dpi=150, current_year_percents_for_printout=None, gap_filled=True, - verbose=True, ): """Plot the baseline (median, inter-quartile, inter-decile) melt ranges into a matplotlib axis. @@ -554,7 +544,8 @@ def _plot_baseline_climatology( If "return_axes" is False or None, return None. """ md_tuples, p10, p25, p50, p75, p90 = _get_baseline_percentiles_from_csv( - region_number=region_num, gap_filled=gap_filled, verbose=False + region_number=region_num, + gap_filled=gap_filled, ) # Convert to percentages p10 = p10 * 100.0 @@ -645,7 +636,6 @@ def _plot_baseline_climatology( fraction_or_area="fraction", region_num=region_num, gap_filled=gap_filled, - verbose=verbose, ) # Plot the previous max in a thin organge line. @@ -661,25 +651,24 @@ def _plot_baseline_climatology( # label='{0} - {1} Daily Maximum'.format(1979, datetimes[-1].year - 1)) # NOTE: If I want to query what the previous dates were, I can put some logic here to print them to the console. - if verbose: - print("============") - print("Region {0} previous maximums:".format(region_num)) - print( - "-- prev_date -- max_pct -- max_area -- cur_date -- cur_pct -- cur_area --" + logger.debug("============") + logger.debug("Region {0} previous maximums:".format(region_num)) + logger.debug( + "-- prev_date -- max_pct -- max_area -- cur_date -- cur_pct -- cur_area --" + ) + region_area_km2 = _get_region_area_km2(region_num) + for pdt, pm, cdt, cm in zip( + max_dts, max_melt_frac, datetimes, current_year_percents_for_printout + ): + logger.debug( + f"{pandas.Timestamp(pdt).to_pydatetime().strftime('%Y-%m-%d')}" + + " {0:>5.2f} ".format(pm * 100.0) + + " {0:>7.0f} ".format(pm * region_area_km2) + + f"{cdt.strftime('%Y-%m-%d')}" + + " {0:>5.2f} ".format(cm) + + " {0:>7.0f}".format(cm / 100.0 * region_area_km2) + + ("**" if cm > (pm * 100) else "") ) - region_area_km2 = _get_region_area_km2(region_num) - for pdt, pm, cdt, cm in zip( - max_dts, max_melt_frac, datetimes, current_year_percents_for_printout - ): - print( - pandas.Timestamp(pdt).to_pydatetime().strftime("%Y-%m-%d"), - " {0:>5.2f} ".format(pm * 100.0), - " {0:>7.0f} ".format(pm * region_area_km2), - cdt.strftime("%Y-%m-%d"), - " {0:>5.2f} ".format(cm), - " {0:>7.0f}".format(cm / 100.0 * region_area_km2), - "**" if cm > (pm * 100) else "", - ) # Put ticks every month, month names in between. ax.xaxis.set_major_locator(mpl.dates.MonthLocator()) # Tick every month. @@ -712,8 +701,7 @@ def _plot_baseline_climatology( _add_plot_legend(ax, adjust_ylim_range=True) if outfile: - if verbose: - print("Plotting", outfile) + logger.debug(f"Plotting {outfile}") if os.path.splitext(outfile)[1].lower() == ".eps": fig.savefig(outfile, dpi=dpi, format="eps") else: @@ -728,7 +716,9 @@ def _plot_baseline_climatology( def _get_baseline_percentiles_from_csv( - region_number=0, df=None, gap_filled=True, verbose=True + region_number=0, + df=None, + gap_filled=True, ): """Read the Antarctica Today baseline climatologies, return the (month,day) tuples and the 10,25,50,75,90th percentiles. @@ -737,7 +727,6 @@ def _get_baseline_percentiles_from_csv( region_num: 0 thru 7. See tb_file_data.antarctic_regions_dict for details. df: Pandas datafram containing the data. If None, open the dataframe and read from it. (Useful to open it only once and pass it along if we will be calling this function repeatedlly.) - verbose: Specifies whether to provide feedback (primarily if opening the CSV file.) Return ------ @@ -750,9 +739,7 @@ def _get_baseline_percentiles_from_csv( - numpy array of 90th percentile values for each day. """ if not df: - df = open_baseline_climatology_csv_as_dataframe( - gap_filled=gap_filled, verbose=verbose - ) + df = open_baseline_climatology_csv_as_dataframe(gap_filled=gap_filled) assert 0 <= region_number < len(antarctic_regions_dict) @@ -785,14 +772,6 @@ def DO_IT_ALL(gap_filled=True): def special_figure_REG5_FEB_APR_2022(outfile): - region_num = 5 - doy_start = (2, 1) - doy_end = (4, 30) - gap_filled = True - add_max_line = True - dpi = 600 - verbose = True - current_date = None """Read the melt data for the melt year up through the "current_datetime", and plot over the baseline climatology. current_datetime should be a date within the melt season (October 1 thru April 30). @@ -811,13 +790,18 @@ def special_figure_REG5_FEB_APR_2022(outfile): outfile: Image file to write out. - verbose: Verbose output. - Return ------ None """ - df = read_daily_melt_numbers_as_dataframe(verbose=False, gap_filled=gap_filled) + region_num = 5 + doy_start = (2, 1) + doy_end = (4, 30) + gap_filled = True + add_max_line = True + dpi = 600 + current_date = None + df = read_daily_melt_numbers_as_dataframe(gap_filled=gap_filled) if current_date is None: current_date = df["date"].iloc[-1] @@ -865,19 +849,19 @@ def special_figure_REG5_FEB_APR_2022(outfile): if len(datetimes) == 0: return - # print("=========================================================") - # print(datetime_start, current_date) - # print(datetimes) - # print(melt_pcts) - # print(records_in_range) + # logger.info("=========================================================") + # logger.info(datetime_start, current_date) + # logger.info(datetimes) + # logger.info(melt_pcts) + # logger.info(records_in_range) datetimes, melt_pcts = _add_nans_in_gaps(datetimes, melt_areas, gap_days_max=4) - # print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++") - # print(datetime_start, current_date) - # print(datetimes) - # print(melt_pcts) - # print(records_in_range) + # logger.info("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++") + # logger.info(datetime_start, current_date) + # logger.info(datetimes) + # logger.info(melt_pcts) + # logger.info(records_in_range) if current_date > datetimes[-1]: current_date = datetimes[-1] @@ -1017,9 +1001,7 @@ def special_figure_REG5_FEB_APR_2022(outfile): - numpy array of 75th percentile values for each day. - numpy array of 90th percentile values for each day. """ - dfb = open_baseline_climatology_csv_as_dataframe( - gap_filled=gap_filled, verbose=verbose - ) + dfb = open_baseline_climatology_csv_as_dataframe(gap_filled=gap_filled) assert 0 <= region_num < len(antarctic_regions_dict) @@ -1142,7 +1124,6 @@ def special_figure_REG5_FEB_APR_2022(outfile): fraction_or_area="area", region_num=region_num, gap_filled=gap_filled, - verbose=verbose, ) # Plot the previous max in a thin organge line. @@ -1159,12 +1140,12 @@ def special_figure_REG5_FEB_APR_2022(outfile): # NOTE: If I want to query what the previous dates were, I can put some logic here to print them to the console. # if verbose: - # print("============") - # print("Region {0} previous maximums:".format(region_num)) - # print("-- prev_date -- max_pct -- max_area -- cur_date -- cur_pct -- cur_area --") + # logger.info("============") + # logger.info("Region {0} previous maximums:".format(region_num)) + # logger.info("-- prev_date -- max_pct -- max_area -- cur_date -- cur_pct -- cur_area --") # region_area_km2 = _get_region_area_km2(region_num) # for pdt, pm, cdt, cm in zip(max_dts, max_melt_frac, datetimes, current_year_percents_for_printout): - # print(pandas.Timestamp(pdt).to_pydatetime().strftime("%Y-%m-%d"), + # logger.info(pandas.Timestamp(pdt).to_pydatetime().strftime("%Y-%m-%d"), # " {0:>5.2f} ".format(pm * 100.), # " {0:>7.0f} ".format(pm * region_area_km2), # cdt.strftime("%Y-%m-%d"), @@ -1202,7 +1183,7 @@ def special_figure_REG5_FEB_APR_2022(outfile): # if outfile: # if verbose: - # print("Plotting", outfile) + # logger.info("Plotting", outfile) # if os.path.splitext(outfile)[1].lower() == ".eps": # fig.savefig(outfile, dpi=dpi, format="eps") # else: @@ -1246,8 +1227,7 @@ def special_figure_REG5_FEB_APR_2022(outfile): base, ext = os.path.splitext(outfile) outfile = base + "_gap_filled" + ext - if verbose: - print("Plotting", outfile) + logger.debug(f"Plotting {outfile}") if os.path.splitext(outfile)[1].lower() == ".eps": fig.savefig(outfile, dpi=dpi, format="eps") else: @@ -1290,7 +1270,6 @@ def special_figure_REG5_FEB_APR_2022(outfile): dpi=1200, add_max_line=False, gap_filled=True, - verbose=True, ) # df = plot_current_year_melt_over_baseline_stats(datetime.datetime(2020,6,30)) diff --git a/antarctica_today/progress_bar.py b/antarctica_today/progress_bar.py index 048e4b3..ffee5f9 100755 --- a/antarctica_today/progress_bar.py +++ b/antarctica_today/progress_bar.py @@ -34,10 +34,10 @@ def ProgressBar( ) filledLength = int((length * iteration) // total) bar = fill * filledLength + "-" * (length - filledLength) - print(f"\r{prefix} |{bar}| {percent}% {suffix}", end=printEnd) + print(f"\r{prefix} |{bar}| {percent}% {suffix}", end=printEnd) # noqa: T201 # Print New Line on Complete if iteration == total: - print() + print() # noqa: T201 # Sample Usage diff --git a/antarctica_today/read_NSIDC_bin_file.py b/antarctica_today/read_NSIDC_bin_file.py index c316bf8..da892fb 100644 --- a/antarctica_today/read_NSIDC_bin_file.py +++ b/antarctica_today/read_NSIDC_bin_file.py @@ -8,6 +8,7 @@ from typing import Tuple, Union import numpy +from loguru import logger from antarctica_today.constants.grid import DEFAULT_GRID_SHAPE from antarctica_today.constants.paths import DATA_TB_DIR @@ -114,8 +115,8 @@ def read_NSIDC_bin_file( multiplier=0.1, ) - print(array1.shape, array1.dtype) - print(array1) + print(array1.shape, array1.dtype) # noqa: T201 + print(array1) # noqa: T201 # An NSIDC-0051 sea-ice concentration v1 file, in a 1-byte unsigned integer array with # a 300-byte header. @@ -130,8 +131,8 @@ def read_NSIDC_bin_file( signed=False, ) - print(array2.shape, array2.dtype) - print(array2) + print(array2.shape, array2.dtype) # noqa: T201 + print(array2) # noqa: T201 # For an Antarctic file array3 = read_NSIDC_bin_file( @@ -143,8 +144,8 @@ def read_NSIDC_bin_file( signed=False, ) - print(array3.shape, array3.dtype) - print(array3) + print(array3.shape, array3.dtype) # noqa: T201 + print(array3) # noqa: T201 # An NSIDC-0079 sea-ice concentration v3 files, in 2-byte unsigned integer array with # a 300-byte header. @@ -159,8 +160,8 @@ def read_NSIDC_bin_file( signed=False, ) - print(array4.shape, array4.dtype) - print(array4) + print(array4.shape, array4.dtype) # noqa: T201 + print(array4) # noqa: T201 # For an Antarctic file, alternately returning the array in floating-point values (your choice, just pick the parameter you want.) array5 = read_NSIDC_bin_file( @@ -173,5 +174,5 @@ def read_NSIDC_bin_file( multiplier=0.1, ) - print(array5.shape, array5.dtype) - print(array5) + print(array5.shape, array5.dtype) # noqa: T201 + print(array5) # noqa: T201 diff --git a/antarctica_today/src_baseline/download_NSIDC_Tb_files.py b/antarctica_today/src_baseline/download_NSIDC_Tb_files.py index 2fecd24..d31d7c0 100644 --- a/antarctica_today/src_baseline/download_NSIDC_Tb_files.py +++ b/antarctica_today/src_baseline/download_NSIDC_Tb_files.py @@ -36,6 +36,8 @@ from getpass import getpass from typing import List +from loguru import logger + try: from urllib.error import HTTPError, URLError from urllib.parse import urlparse @@ -107,7 +109,7 @@ def get_credentials(url): errprefix = "netrc error: " except Exception as e: if not ("No such file" in str(e)): - print("netrc error: {0}".format(str(e))) + logger.error("netrc error: {0}".format(str(e))) username = None password = None @@ -125,7 +127,7 @@ def get_credentials(url): opener = build_opener(HTTPCookieProcessor()) opener.open(req) except HTTPError: - print(errprefix + "Incorrect username or password") + logger.error(errprefix + "Incorrect username or password") errprefix = "" credentials = None username = None @@ -137,7 +139,7 @@ def get_credentials(url): def build_version_query_params(version): desired_pad_length = 3 if len(version) > desired_pad_length: - print('Version string too long: "{0}"'.format(version)) + logger.error('Version string too long: "{0}"'.format(version)) quit() version = str(int(version)) # Strip off any leading zeros @@ -178,7 +180,7 @@ def cmr_download(urls, output_dir=None, credentials=None): return url_count = len(urls) - print("Downloading {0} files...".format(url_count)) + logger.info("Downloading {0} files...".format(url_count)) # credentials = None for index, url in enumerate(urls, start=1): @@ -188,7 +190,7 @@ def cmr_download(urls, output_dir=None, credentials=None): filename = url.split("/")[-1] if output_dir != None: filename = os.path.join(output_dir, filename) - print( + logger.info( "{0}/{1}: {2}".format( str(index).zfill(len(str(url_count))), url_count, filename ) @@ -205,9 +207,9 @@ def cmr_download(urls, output_dir=None, credentials=None): data = opener.open(req).read() open(filename, "wb").write(data) except HTTPError as e: - print("HTTP error {0}, {1}".format(e.code, e.reason)) + logger.info("HTTP error {0}, {1}".format(e.code, e.reason)) except URLError as e: - print("URL error: {0}".format(e.reason)) + logger.info("URL error: {0}".format(e.reason)) except IOError: raise except KeyboardInterrupt: @@ -272,7 +274,7 @@ def cmr_search( polygon=polygon, filename_filter=filename_filter, ) - print("Querying for data:\n\t{0}\n".format(cmr_query_url)) + logger.info("Querying for data:\n\t{0}\n".format(cmr_query_url)) cmr_scroll_id = None ctx = ssl.create_default_context() @@ -292,21 +294,21 @@ def cmr_search( cmr_scroll_id = headers["cmr-scroll-id"] hits = int(headers["cmr-hits"]) if hits > 0: - print("Found {0} matches.".format(hits)) + logger.info("Found {0} matches.".format(hits)) else: - print("Found no matches.") + logger.info("Found no matches.") search_page = response.read() search_page = json.loads(search_page.decode("utf-8")) url_scroll_results = cmr_filter_urls(search_page) if not url_scroll_results: break if hits > CMR_PAGE_SIZE: - print(".", end="") + print(".", end="") # noqa: T201 sys.stdout.flush() urls += url_scroll_results if hits > CMR_PAGE_SIZE: - print() + print() # noqa: T201 return urls except KeyboardInterrupt: quit() diff --git a/antarctica_today/src_baseline/extrapolate_thermap_readings_off_map_edge.py b/antarctica_today/src_baseline/extrapolate_thermap_readings_off_map_edge.py index e4e5b6b..80d1bf4 100644 --- a/antarctica_today/src_baseline/extrapolate_thermap_readings_off_map_edge.py +++ b/antarctica_today/src_baseline/extrapolate_thermap_readings_off_map_edge.py @@ -14,6 +14,7 @@ import os import numpy +from loguru import logger from osgeo import gdal ice_mask_tif = "F:/Research/DATA/Antarctica_Today/baseline_datasets/ice_mask.tif" @@ -38,7 +39,7 @@ out_ndv = 0.0 out_array[out_array == tm_ndv] = out_ndv -print(numpy.where(numpy.logical_and((im_array == 1), (tm_array == tm_ndv)))) +logger.info(numpy.where(numpy.logical_and((im_array == 1), (tm_array == tm_ndv)))) # These are the hand-selected pixel values, eight lines total. # Five going vertically along Queen Maud Land, extrapolating 1-2 pixels @@ -85,9 +86,9 @@ def f(x, a, b, c): p = numpy.polyfit(known_x, tm_array[known_i, known_j], 2) extrapolated_values = f(numpy.array(interp_x), *p) - print("\n", known_i, interp_i, known_j, interp_j) - print(tm_array[known_i, known_j], extrapolated_values) - print(known_x, interp_x) + logger.info(f"\n {known_i} {interp_i} {known_j} {interp_j}") + logger.info(f"{tm_array[known_i, known_j]} {extrapolated_values}") + logger.info(f"{known_x} {interp_x}") # Fill in missing values with extrapolated values out_array[interp_i, interp_j] = extrapolated_values @@ -119,4 +120,4 @@ def f(x, a, b, c): ds_out.FlushCache() band_out = None ds_out = None -print("\n", thermap_tif_out, "written.") +logger.info(f"\n {thermap_tif_out} written.") diff --git a/antarctica_today/src_baseline/plot_thermap_lapse_rate.py b/antarctica_today/src_baseline/plot_thermap_lapse_rate.py index 5b09ee9..1bde444 100644 --- a/antarctica_today/src_baseline/plot_thermap_lapse_rate.py +++ b/antarctica_today/src_baseline/plot_thermap_lapse_rate.py @@ -3,6 +3,7 @@ import numpy import pandas as pd import statsmodels.api as sm +from loguru import logger from matplotlib import pyplot as plt from matplotlib.axes import Axes from matplotlib.figure import Figure @@ -30,7 +31,7 @@ X = sm.add_constant(elevs) model_elev_only = sm.OLS(thermap_df[["Temp"]], X).fit() -print(model_elev_only.summary()) +logger.info(model_elev_only.summary()) coefs = model_elev_only.params @@ -75,11 +76,11 @@ def plus_minus_op(x): X = thermap_df[["REMA_or_Thermap_Elev", "Lat(S)"]] Y = thermap_df[["Temp"]] -print("\n=== Statsmodels ===") +logger.info("=== Statsmodels ===") X = sm.add_constant(X) model = sm.OLS(Y, X).fit() -print(model.summary()) +logger.info(model.summary()) coefs = model.params temps_lat_corrected_75 = temps - coefs["Lat(S)"] * (75 + lats) @@ -87,7 +88,7 @@ def plus_minus_op(x): # # Compute a quadratic curve through this line. # poly_coefs = numpy.polyfit(elevs, temps_lat_corrected_75, deg=2) -# print(poly_coefs) +# logger.info(poly_coefs) # # Quadratic trend-line # trend_x = numpy.linspace(*min_max_elev, 100) # trend_y = poly_coefs[0]*(trend_x**2) + poly_coefs[1]*trend_x + poly_coefs[2] diff --git a/antarctica_today/src_baseline/resize_25m_grid_by_1000.py b/antarctica_today/src_baseline/resize_25m_grid_by_1000.py index d9d7e73..fb6b894 100644 --- a/antarctica_today/src_baseline/resize_25m_grid_by_1000.py +++ b/antarctica_today/src_baseline/resize_25m_grid_by_1000.py @@ -9,6 +9,7 @@ import os +from loguru import logger from osgeo import gdal infile = "C:/Users/mmacferrin/Dropbox/Research/Antarctica_Today/Dan Dixon/derived/polar_grid_10m_temps_25m_OFF_BY_1000.tif" @@ -45,4 +46,4 @@ ds_out.FlushCache() band_out = None ds_out = None -print(outfile, "written.") +logger.info(f"Wrote {outfile}") diff --git a/antarctica_today/src_baseline/resize_grid_to_reference.py b/antarctica_today/src_baseline/resize_grid_to_reference.py index 0f7fee1..5ca1063 100644 --- a/antarctica_today/src_baseline/resize_grid_to_reference.py +++ b/antarctica_today/src_baseline/resize_grid_to_reference.py @@ -9,24 +9,23 @@ import gdal import numpy +from loguru import logger -def resize_tif_to_reference_grid(gtif_in, gtif_reference, gtif_out, verbose=False): +def resize_tif_to_reference_grid(gtif_in, gtif_reference, gtif_out): """I have RACMO & REMA files written out the same grid format & resolution as the NSIDC's nsidc-0001 and nsidc-0080 files. But the grid sizes are different with different boundaries. This takes a .tif GeoTiff, and a reference Tb GeoTiff, and creates a copy of the gtif_in data with the same array size as the gtif_Tb_reference, and spits it out to gtif_out. Extra values are filled in with the gtif_in NoDataValue. """ - if verbose: - print("Reading", os.path.split(gtif_in)[1]) + logger.debug(f"Reading {os.path.split(gtif_in)[1]}") ds_in = gdal.Open(gtif_in, gdal.GA_ReadOnly) if ds_in is None: raise FileNotFoundError("Gdal could not read input file '{0}'".format(gtif_in)) - if verbose: - print("Reading", os.path.split(gtif_reference)[1]) + logger.debug(f"Reading {os.path.split(gtif_reference)[1]}") ds_ref = gdal.Open(gtif_reference, gdal.GA_ReadOnly) if ds_ref is None: @@ -66,7 +65,9 @@ def resize_tif_to_reference_grid(gtif_in, gtif_reference, gtif_out, verbose=Fals (x_UL_in % x_res_in) == (x_UL_ref % x_res_ref) and (y_UL_in % y_res_in) == (y_UL_ref % y_res_ref) ): - print( + msg = "Input grids are not geographically aligned." + logger.error(msg) + logger.error( "X: {0} % {1} = {2}, {3} % {4} = {5}".format( x_UL_in, x_res_in, @@ -76,7 +77,7 @@ def resize_tif_to_reference_grid(gtif_in, gtif_reference, gtif_out, verbose=Fals x_UL_ref % x_res_ref, ) ) - print( + logger.error( "Y: {0} % {1} = {2}, {3} % {4} = {5}".format( y_UL_in, y_res_in, @@ -86,7 +87,7 @@ def resize_tif_to_reference_grid(gtif_in, gtif_reference, gtif_out, verbose=Fals y_UL_ref % y_res_ref, ) ) - raise ValueError("Input grids are not geographically aligned.") + raise ValueError(msg) # Create the output array, same shape as the reference array, but same datatype # as the source array. Fill with the array_in NDV @@ -147,8 +148,7 @@ def resize_tif_to_reference_grid(gtif_in, gtif_reference, gtif_out, verbose=Fals ds_out.FlushCache() ds_out = None - if verbose: - print(os.path.split(gtif_out)[-1], "written.") + logger.debug(f"Wrote {os.path.split(gtif_out)[-1]}") return @@ -160,13 +160,6 @@ def read_and_parse_args(): parser.add_argument("input_gtif", type=str, help="Source file (.tif)") parser.add_argument("reference_gtif", type=str, help="Reference file (.tif)") parser.add_argument("output_gtif", type=str, help="Destination file (.tif)") - parser.add_argument( - "--verbose", - "-v", - action="store_true", - default=False, - help="Increase output verbosity.", - ) return parser.parse_args() @@ -175,5 +168,7 @@ def read_and_parse_args(): args = read_and_parse_args() resize_tif_to_reference_grid( - args.input_gtif, args.reference_gtif, args.output_gtif, verbose=args.verbose + args.input_gtif, + args.reference_gtif, + args.output_gtif, ) diff --git a/antarctica_today/src_baseline/sample_REMA_elevations.py b/antarctica_today/src_baseline/sample_REMA_elevations.py index e1d4130..d7bcd41 100644 --- a/antarctica_today/src_baseline/sample_REMA_elevations.py +++ b/antarctica_today/src_baseline/sample_REMA_elevations.py @@ -4,6 +4,7 @@ import numpy import pandas as pd +from loguru import logger from matplotlib import pyplot as plt EGM96 = True @@ -19,7 +20,7 @@ thermap_df["REMA_Elev"] = [0] * len(thermap_df) thermap_df["REMA_or_Thermap_Elev"] = [0] * len(thermap_df) -print(thermap_df.columns) +logger.info(thermap_df.columns) elevs = thermap_df["Elev"] lons = thermap_df["Lon(W)"] @@ -40,11 +41,10 @@ def get_rema_elev(row): ) ) try: - print( + logger.info( "{0:>30s} {1:0.2f} {2:0.2f}, {4:0.2f}*C, {3:0.1f} -> ".format( row["Name"], row["Lat(S)"], row["Lon(W)"], row["Elev"], row["Temp"] - ), - end="", + ) ) elev_value = float(return_line.read()) row["REMA_Elev"] = float(elev_value) @@ -61,12 +61,12 @@ def get_rema_elev(row): rema_elevs[idx] = row["REMA_Elev"] # type: ignore [call-overload] rema_or_thermap_elevs[idx] = row["REMA_or_Thermap_Elev"] # type: ignore [call-overload] - print("{0:0.1f}".format(row["REMA_Elev"])) + logger.info("-> {0:0.1f}".format(row["REMA_Elev"])) thermap_df["REMA_Elev"] = rema_elevs thermap_df["REMA_or_Thermap_Elev"] = rema_or_thermap_elevs -print("Done") +logger.info("Done") fig, ax = plt.subplots(1, 1, figsize=(4, 4)) ax.set_aspect("equal") @@ -90,8 +90,8 @@ def get_rema_elev(row): ) fig.savefig(fig_outfile, dpi=120) -print(os.path.split(fig_outfile)[1], "saved.") +logger.info(f"Saved {os.path.split(fig_outfile)[1]}") thermap_df.fillna("", inplace=True) thermap_df.to_csv(output_csv, index=False, header=True) -print(os.path.split(output_csv)[1], "saved.") +logger.info(f"Saved {os.path.split(output_csv)[1]}") diff --git a/antarctica_today/svgclip.py b/antarctica_today/svgclip.py index 5d43451..f2605c6 100755 --- a/antarctica_today/svgclip.py +++ b/antarctica_today/svgclip.py @@ -32,6 +32,7 @@ import cairo import gi +from loguru import logger gi.require_version("Rsvg", "2.0") gi.require_foreign("cairo") @@ -58,7 +59,7 @@ def get_bounding_box(svgfile): def print_info(svgfile): bbox = get_bounding_box(svgfile) - print( + logger.info( """ X: %f Y: %f diff --git a/antarctica_today/update_data.py b/antarctica_today/update_data.py index c478ff8..3bc3126 100644 --- a/antarctica_today/update_data.py +++ b/antarctica_today/update_data.py @@ -13,6 +13,7 @@ import dateutil.parser import matplotlib.pyplot import numpy +from loguru import logger from antarctica_today import ( generate_daily_melt_file, @@ -48,7 +49,7 @@ def get_list_of_NSIDC_bin_files_to_import( if os.path.splitext(f)[1].lower() == target_extension.strip().lower() ] - # print(len(file_list_all), "total files.") + # logger.info(len(file_list_all), "total files.") # Filter out only the files we want. hemisphere_lower = hemisphere.lower() @@ -63,7 +64,7 @@ def get_list_of_NSIDC_bin_files_to_import( + ".bin$" ) - # print(search_template) + # logger.info(search_template) # Create a compiled regular-expression search object pattern = re.compile(search_template) # Keep only the file names that match the search pattern. @@ -182,7 +183,7 @@ def update_everything_to_latest_date( fnames_this_date[0].suffix.lower() == ".nc" ): # Read in netCDF file here. - # print("Generating melt file {0} from {1}.".format(os.path.basename(melt_bin_fname), + # logger.info("Generating melt file {0} from {1}.".format(os.path.basename(melt_bin_fname), # os.path.basename(fnames_this_date[0]) # ) # ) @@ -261,11 +262,11 @@ def update_everything_to_latest_date( new_daily_melt_arrays = [] new_daily_dts = [] - # print("dt_today:", dt_today) - # print("latest_dt_in_array:", latest_dt_in_array) - # print(range(1, ((dt_today - latest_dt_in_array).days + 1))) - # print(melt_bin_files[-1]) - # print(melt_bin_paths[-1]) + # logger.info("dt_today:", dt_today) + # logger.info("latest_dt_in_array:", latest_dt_in_array) + # logger.info(range(1, ((dt_today - latest_dt_in_array).days + 1))) + # logger.info(melt_bin_files[-1]) + # logger.info(melt_bin_paths[-1]) # For each day, find the .bin file for that day (if it exists) and append it to the list. for day_delta in range(1, ((dt_today - latest_dt_in_array).days + 1)): @@ -288,7 +289,7 @@ def update_everything_to_latest_date( daily_melt_array = read_NSIDC_bin_file( melt_filepath, element_size=2, return_type=int, signed=True, multiplier=1 ) - print(melt_filename, "read.") + logger.info(f"Read {melt_filename}") # Add a 3rd (time) dimension to each array to allow concatenating. daily_melt_array.shape = list(daily_melt_array.shape) + [1] @@ -313,7 +314,7 @@ def update_everything_to_latest_date( pickle.dump((melt_array_updated, dt_dict), f) f.close() - print(tb_file_data.model_results_picklefile, "written.") + logger.info(f"Wrote {tb_file_data.model_results_picklefile}") else: melt_array_updated = previous_melt_array @@ -375,7 +376,6 @@ def update_everything_to_latest_date( region_num=region_num, gap_filled=True, outfile=line_plot_outfile, - verbose=True, ) # Clear the figures just made above in order to not get too many open at once. @@ -406,7 +406,6 @@ def copy_latest_date_plots_to_date_directory( anomaly_maps_dir=map_filedata.anomaly_maps_directory, line_plots_dir=tb_file_data.climatology_plots_directory, use_symlinks=True, - verbose=True, ): """After running the 'update_everything_to_latest_date()' function, use this to gather all the latest-date plots into one location. Put it in a sub-directory of the daily_plots_gathered_dir @@ -453,8 +452,7 @@ def copy_latest_date_plots_to_date_directory( dest_dir_location = os.path.join(dest_parent_dir, date_string) if not os.path.exists(dest_dir_location): os.mkdir(dest_dir_location) - if verbose: - print("Created directory '{0}'.".format(dest_dir_location)) + logger.debug("Created directory '{0}'.".format(dest_dir_location)) for fn in files_to_move: src = fn @@ -467,8 +465,7 @@ def copy_latest_date_plots_to_date_directory( else: shutil.copyfile(src, dst) - if verbose: - print("{0} -> {1}.".format(src, dst)) + logger.debug("{0} -> {1}.".format(src, dst)) if __name__ == "__main__": diff --git a/antarctica_today/write_NSIDC_bin_to_gtif.py b/antarctica_today/write_NSIDC_bin_to_gtif.py index 5126c40..4002e6a 100644 --- a/antarctica_today/write_NSIDC_bin_to_gtif.py +++ b/antarctica_today/write_NSIDC_bin_to_gtif.py @@ -10,6 +10,7 @@ from typing import Type import numpy +from loguru import logger from osgeo import gdal, osr from antarctica_today.read_NSIDC_bin_file import read_NSIDC_bin_file @@ -112,7 +113,6 @@ def output_bin_to_gtif( header_size=0, resolution=None, hemisphere=None, - verbose=True, nodata=0, signed=False, multiplier="auto", @@ -139,9 +139,6 @@ def output_bin_to_gtif( hemisphere = "N" or "S" If None, the hemisphere is derived from the nsidc-0001 filename. - verbose = Verbosity of the output. False will run this silently. True will - produce feedback to stdout. (default True) - nodata = Nodata value to put in the geotiff. Defaults to 0.0 return_type = The data type of the geotiff raster band. Defaults to float. @@ -199,7 +196,6 @@ def output_bin_to_gtif( resolution=resolution, hemisphere=hemisphere, nodata=nodata, - verbose=verbose, ) return @@ -219,7 +215,11 @@ def get_nsidc_geotransform(hemisphere, resolution): def output_gtif( - array, gtif_file, resolution=25, hemisphere="S", nodata=0, verbose=True + array, + gtif_file, + resolution=25, + hemisphere="S", + nodata=0, ): """Take an array, output to a geotiff in the NSIDC resolution specified. @@ -288,8 +288,7 @@ def output_gtif( ds.FlushCache() ds = None - if verbose: - print(gtif_file, "written.") + logger.debug(f"Wrote {gtif_file}") return @@ -360,13 +359,6 @@ def read_and_parse_args(): default=False, help="Read bin as signed data. Default to unsigned.", ) - parser.add_argument( - "--verbose", - "-v", - action="store_true", - default=False, - help="Increase output verbosity.", - ) return parser.parse_args() @@ -433,5 +425,4 @@ def read_and_parse_args(): nodata=int(args.nodata), return_type=out_type, multiplier=multiplier, - verbose=args.verbose, ) diff --git a/antarctica_today/write_flat_binary.py b/antarctica_today/write_flat_binary.py index 546713a..b58b711 100644 --- a/antarctica_today/write_flat_binary.py +++ b/antarctica_today/write_flat_binary.py @@ -11,6 +11,7 @@ import os import numpy +from loguru import logger from osgeo import gdal @@ -21,7 +22,6 @@ def write_array_to_binary( multiplier=1, byteorder="little", signed=False, - verbose=True, ): if int(numbytes) not in (1, 2, 4, 8): raise ValueError("Numbytes must be one of 1,2,4,8.") @@ -63,8 +63,7 @@ def write_array_to_binary( f.close() - if verbose: - print(os.path.split(bin_filename)[-1], "written.") + logger.debug(f"Wrote {os.path.split(bin_filename)[-1]}") return bin_filename @@ -77,14 +76,12 @@ def write_gtif_to_binary( multiplier=1, byteorder="little", signed=False, - verbose=True, ): rasterband = int(rasterband) if rasterband < 1: raise ValueError("Raster band must be an integer greater than or equal to 1.") - if verbose: - print("Reading", os.path.split(gtif_filename)[-1]) + logger.debug(f"Reading {os.path.split(gtif_filename)[-1]}") ds = gdal.Open(gtif_filename, gdal.GA_ReadOnly) if ds is None: @@ -110,7 +107,6 @@ def write_gtif_to_binary( multiplier=multiplier, byteorder=byteorder, signed=signed, - verbose=verbose, ) @@ -160,13 +156,6 @@ def read_and_parse_args(): default=False, help="Signed data. Defaults to unsigned. Results will be the same if no negative values are in the array.", ) - parser.add_argument( - "--verbose", - "-v", - action="store_true", - default=False, - help="Increase output verbosity.", - ) return parser.parse_args() @@ -180,5 +169,4 @@ def read_and_parse_args(): numbytes=args.numbytes, multiplier=args.multiplier, byteorder=args.byteorder, - verbose=args.verbose, ) diff --git a/conda-lock.yml b/conda-lock.yml index 715083a..4383a27 100644 --- a/conda-lock.yml +++ b/conda-lock.yml @@ -13,7 +13,7 @@ version: 1 metadata: content_hash: - linux-64: f3266edc27a5d2b17f9d24a563487e778a5aeccd2d192a3a31f5525b50817296 + linux-64: 5a8ecb85578447b396175b83bbbb9299310098579a3245564715c5d96ec659ba channels: - url: conda-forge used_env_vars: [] @@ -1754,10 +1754,10 @@ package: platform: linux-64 dependencies: libopenblas: '>=0.3.27,<1.0a0' - url: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-22_linux64_openblas.conda + url: https://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-23_linux64_openblas.conda hash: - md5: 1a2a0cd3153464fee6646f3dd6dad9b8 - sha256: 082b8ac20d43a7bbcdc28b3b1cd40e4df3a8b5daf0a2d23d68953a44d2d12c1b + md5: 96c8450a40aa2b9733073a9460de972c + sha256: edb1cee5da3ac4936940052dcab6969673ba3874564f90f5110f8c11eed789c2 category: main optional: false - name: libbrotlicommon @@ -1804,10 +1804,10 @@ package: platform: linux-64 dependencies: libblas: 3.9.0 - url: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-22_linux64_openblas.conda + url: https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-23_linux64_openblas.conda hash: - md5: 4b31699e0ec5de64d5896e580389c9a1 - sha256: da1b2faa017663c8f5555c1c5518e96ac4cd8e0be2a673c1c9e2cb8507c8fe46 + md5: eede29b40efa878cbe5bdcb767e97310 + sha256: 3e7a3236e7e03e308e1667d91d0aa70edd0cba96b4b5563ef4adde088e0881a5 category: main optional: false - name: libcurl @@ -2082,10 +2082,10 @@ package: platform: linux-64 dependencies: libblas: 3.9.0 - url: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-22_linux64_openblas.conda + url: https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-23_linux64_openblas.conda hash: - md5: b083767b6c877e24ee597d93b87ab838 - sha256: db246341d42f9100d45adeb1a7ba8b1ef5b51ceb9056fd643e98046a3259fde6 + md5: 2af0879961951987e464722fd00ec1e0 + sha256: 25c7aef86c8a1d9db0e8ee61aa7462ba3b46b482027a65d66eb83e3e6f949043 category: main optional: false - name: libnetcdf @@ -2375,6 +2375,19 @@ package: sha256: adf6096f98b537a11ae3729eaa642b0811478f0ea0402ca67b5108fe2cb0010d category: main optional: false +- name: loguru + version: 0.7.2 + manager: conda + platform: linux-64 + dependencies: + python: '>=3.11,<3.12.0a0' + python_abi: 3.11.* + url: https://conda.anaconda.org/conda-forge/linux-64/loguru-0.7.2-py311h38be061_1.conda + hash: + md5: 94a4521bd7933a66d76b0274dbf8d2dd + sha256: 8b4e50db81def33fbb819ebaa2b3b50b92fa8b877b174a4e8d89e2e88a89750e + category: main + optional: false - name: lz4-c version: 1.9.4 manager: conda diff --git a/environment.yml b/environment.yml index 9cc0619..c529f8a 100644 --- a/environment.yml +++ b/environment.yml @@ -9,6 +9,7 @@ dependencies: # Runtime dependencies # -------------------- - click ~=8.1 + - loguru ~=0.7.2 - earthaccess ~=0.10.0 - gdal ~=3.5 - pandas ~=1.4 diff --git a/pyproject.toml b/pyproject.toml index 67f7132..466459b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,7 @@ select = [ # "B", # flake8-bugbear # "A", # flake8-builtins "T10", # flake8-debugger + "T20", # flake8-print # "RUF", # ruff-specific rules ]