Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: logging 3 - remove prints from tools directory #177

Merged
merged 16 commits into from
Mar 21, 2024
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion src/icesat2waves/tools/beam_stats.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

import numpy as np
import pandas as pd
import icesat2waves.tools.spectral_estimates as spec
Expand All @@ -7,6 +9,8 @@
import matplotlib.gridspec as gridspec
import h5py

_logger = logging.getLogger(__name__)


def derive_beam_statistics(Gd, all_beams, Lmeter=10e3, dx=10):
"""
Expand All @@ -24,7 +28,7 @@ def derive_beam_statistics(Gd, all_beams, Lmeter=10e3, dx=10):
elif isinstance(Gd, h5py.File):
Gi = io_local.get_beam_hdf_store(Gd[k])
else:
print("Gd is neither dict nor hdf5 file")
_logger.debug("Gd is neither dict nor hdf5 file")
break

dd = Gi["h_mean"]
Expand Down
53 changes: 27 additions & 26 deletions src/icesat2waves/tools/generalized_FT.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import copy
import logging
import time

from numpy import linalg
Expand All @@ -13,6 +14,8 @@
from icesat2waves.tools import lanczos, spectral_estimates as spec
import icesat2waves.local_modules.jonswap_gamma as spectal_models

_logger = logging.getLogger(__name__)


def rebin(data, dk):
"""
Expand Down Expand Up @@ -256,7 +259,7 @@ def calc_gFT_apply(stancil, prior):
if (
x.size / Lpoints < 0.40
): # if there are not enough photos set results to nan
print(" -- data density to low, skip stancil")
_logger.debug(" -- data density to low, skip stancil")
return {
"stancil_center": stancil[1],
"p_hat": np.concatenate([self.k * np.nan, self.k * np.nan]),
Expand Down Expand Up @@ -288,7 +291,7 @@ def calc_gFT_apply(stancil, prior):
# define error
err = ERR[x_mask] if ERR is not None else 1

print("compute time weights : ", time.perf_counter() - ta)
_logger.debug("compute time weights : %s", time.perf_counter() - ta)

ta = time.perf_counter()
FT.define_problem(weight, err)
Expand All @@ -297,17 +300,15 @@ def calc_gFT_apply(stancil, prior):
p_hat = FT.solve()

if np.isnan(np.mean(p_hat)):
print(" -- inversion nan!")
print(" -- data fraction", x.size / Lpoints)
print(
" -- weights:",
_logger.debug(" -- inversion nan!")
_logger.debug(" -- data fraction %s", x.size / Lpoints)
_logger.debug(
" -- weights: %s err: %s y: %s",
np.mean(weight),
"err:",
np.mean(err),
"y:",
np.mean(y),
)
print(" -- skip stancil")
_logger.debug(" -- skip stancil")
return {
"stancil_center": stancil[1],
"p_hat": np.concatenate([self.k * np.nan, self.k * np.nan]),
Expand All @@ -320,7 +321,7 @@ def calc_gFT_apply(stancil, prior):
"spec_adjust": np.nan,
}

print("compute time solve : ", time.perf_counter() - ta)
_logger.debug("compute time solve : %s", time.perf_counter() - ta)
ta = time.perf_counter()

x_pos = (np.round((x - stancil[0]) / self.dx, 0)).astype("int")
Expand All @@ -337,7 +338,7 @@ def calc_gFT_apply(stancil, prior):
for k, I in prior_pars.items():
inverse_stats[k] = I.value if hasattr(I, "value") else np.nan

print("compute time stats : ", time.perf_counter() - ta)
_logger.debug("compute time stats : %s", time.perf_counter() - ta)

# multiply with the standard deviation of the data to get dimensions right
PSD = power_from_model(p_hat, dk, self.k.size, x.size, Lpoints)
Expand Down Expand Up @@ -366,7 +367,7 @@ def calc_gFT_apply(stancil, prior):
plt.legend()
plt.show()

print("---------------------------------")
_logger.debug("---------------------------------")

# return dict with all relevant data
return_dict = {
Expand Down Expand Up @@ -397,18 +398,18 @@ def calc_gFT_apply(stancil, prior):
N_stencil = len(self.stancil_iter_list.T)
Ni = 1
for ss in copy.copy(self.stancil_iter):
print(Ni, "/", N_stencil, "Stancils")
_logger.debug("%s / %s Stancils", Ni, N_stencil)
# prior step
if prior[0] is False: # make NL fit of piors do not exist
print("1st step: with NL-fit")
_logger.debug("1st step: with NL-fit")
I_return = calc_gFT_apply(ss, prior=prior)
prior = I_return["PSD"], I_return["weight"]
# 2nd step
if prior[0] is False:
print("1st GFD failed (priors[0]=false), skip 2nd step")
_logger.debug("1st GFD failed (priors[0]=false), skip 2nd step")
else:
print("2nd step: use set priors:", type(prior[0]), type(prior[1]))
print(prior[0][0:3], prior[1][0:3])
_logger.debug("2nd step: use set priors of types: %s %s", type(prior[0]), type(prior[1]))
_logger.debug("first three elements of priors: %s %s", prior[0][0:3], prior[1][0:3])
I_return = calc_gFT_apply(ss, prior=prior)
prior = I_return["PSD"], I_return["weight"]

Expand Down Expand Up @@ -471,7 +472,7 @@ def calc_gFT_apply(stancil, prior):
N_per_stancil.append(I["x_size"])
Spec_adjust_per_stancil.append(spec_adjust)

print("# of x-coordinates" + str(len(Spec_returns)))
_logger.debug("# of x-coordinates %s", len(Spec_returns))

self.N_per_stancil = N_per_stancil
chunk_positions = np.array(list(D_specs.keys()))
Expand Down Expand Up @@ -578,10 +579,10 @@ def calc_gFT_apply(stancil, prior):
# check sizes and adjust if necessary.
if x_pos.size > I["model_error_x"].size:
x_pos = x_pos[0 : I["model_error_x"].size]
print("adjust x")
_logger.debug("adjust x")
elif x_pos.size < I["model_error_x"].size:
I["model_error_x"] = I["model_error_x"][0:-1]
print("adjust y")
_logger.debug("adjust y")

x_err[x_pos] = I["model_error_x"]
model_error_x[xi] = xr.DataArray(
Expand Down Expand Up @@ -657,10 +658,10 @@ def get_stancil_var_apply(stancil):

stancil_weighted_variance = np.nansum(np.array(stancil_vars)) / Nphotons

print("Parcevals Theorem:")
print("variance of timeseries: ", DATA.var())
print("mean variance of stancils: ", stancil_weighted_variance)
print("variance of the optimzed windowed LS Spectrum: ", self.calc_var())
_logger.debug("Parcevals Theorem:")
_logger.debug("variance of timeseries: %s", DATA.var())
_logger.debug("mean variance of stancils: %s", stancil_weighted_variance)
_logger.debug("variance of the optimzed windowed LS Spectrum: %s", self.calc_var())

if add_attrs:
self.G.attrs["variance_unweighted_data"] = DATA.var()
Expand Down Expand Up @@ -874,7 +875,7 @@ def get_stats(self, dk, Nx_full, print_flag=False):
"var_spec_complete",
"spec_adjust",
]:
print(ki.ljust(20) + str(pars[ki]))
_logger.debug("%s", ki.ljust(20) + str(pars[ki]))

return pars

Expand Down Expand Up @@ -972,7 +973,7 @@ def runningmean(self, var, m, tailcopy=False):
m = int(m)
s = var.shape
if s[0] <= 2 * m:
print("0 Dimension is smaller then averaging length")
_logger.debug("0 Dimension is smaller then averaging length")
return
rr = np.asarray(var) * np.nan

Expand Down
30 changes: 17 additions & 13 deletions src/icesat2waves/tools/iotools.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
import os
import re
import json
Expand All @@ -17,6 +18,9 @@
import icesat2waves.tools.convert_GPS_time as cGPS


_logger = logging.getLogger(__name__)


def init_from_input(arguments):
"""
Initializes the variables track_name, batch_key, and test_flag based on the input arguments.
Expand All @@ -32,7 +36,7 @@ def init_from_input(arguments):
track_name = "20190605061807_10380310_004_01"
batch_key = "SH_batch01"
test_flag = True
print("use standard values")
_logger.debug("use standard values")

else:
track_name = arguments[1]
Expand Down Expand Up @@ -65,7 +69,7 @@ def init_data(ID_name, batch_key, ID_flag, ID_root, prefix="A01b_ID"):

"""

print(ID_name, batch_key, ID_flag)
_logger.debug("id name: %s, batch key: %s, id flag: %s", ID_name, batch_key, ID_flag)
hemis, batch = batch_key.split("_")

if ID_flag:
Expand Down Expand Up @@ -98,13 +102,13 @@ def get_atl06p(ATL03_track_name, params_yapc, maximum_height):
Returns:
geopandas.GeoDataFrame: The geodataframe containing the ATL06 data.
"""
print("Retrieving data from sliderule ...")
_logger.debug("Retrieving data from sliderule ...")
gdf = icesat2.atl06p(params_yapc, resources=[ATL03_track_name])

if gdf.empty:
raise ValueError("Empty Geodataframe. No data could be retrieved.")

print("Initial data retrieved")
_logger.debug("Initial data retrieved")
gdf = sct.correct_and_remove_height(gdf, maximum_height)
return gdf

Expand Down Expand Up @@ -264,7 +268,7 @@ def nsidc_icesat2_get_associated_file(
remote_names = []

for input_file in file_list:
# print(input_file)
# _logger.debug(input_file)
hollandjg marked this conversation as resolved.
Show resolved Hide resolved
# -- extract parameters from ICESat-2 ATLAS HDF5 file name
SUB, PRD, HEM, YY, MM, DD, HH, MN, SS, TRK, CYC, GRN, RL, VRS = rx.findall(
input_file
Expand All @@ -288,10 +292,10 @@ def nsidc_icesat2_get_associated_file(
colnames, collastmod, colerror = icesat2_toolkit.utilities.nsidc_list(
PATH, build=False, timeout=TIMEOUT, parser=parser, pattern=R1, sort=True
)
print(colnames)
_logger.debug("colnames: %s", colnames)
# -- print if file was not found
if not colnames:
print(colerror)
_logger.debug("colerror: %s", colerror)
continue
# -- add to lists
for colname, remote_mtime in zip(colnames, collastmod):
Expand All @@ -312,7 +316,7 @@ def json_load(name, path, verbose=False):
with open(full_name, "r") as ifile:
data = json.load(ifile)
if verbose:
print("loaded from: ", full_name)
_logger.debug("loaded from: %s", full_name)
return data


Expand All @@ -328,7 +332,7 @@ def ATL03_download(username, password, dpath, product_directory, sd, file_name):
"""

HOST = ["https://n5eil01u.ecs.nsidc.org", "ATLAS", product_directory, sd, file_name]
print("download to:", dpath + "/" + HOST[-1])
_logger.debug("download to: %s", dpath + "/" + HOST[-1])
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fewer characters, same result. Thoughts?

Suggested change
_logger.debug("download to: %s", dpath + "/" + HOST[-1])
_logger.debug("download to: %s", dpath"/"HOST[-1])

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wow, that works? It looks like a syntax error to me. I didn't know you could concatenate strings that way. How does Black reformat it? I'm not super keen – I don't think the suggestion makes the syntax any clearer.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe it doesn't work. I guess it only works with literals.

buffer, error = icesat2_toolkit.utilities.from_nsidc(
HOST,
username=username,
Expand Down Expand Up @@ -389,7 +393,7 @@ def write_track_to_HDF5(data_dict, name, path, verbose=False, mode="w"):
store1[kk] = I

if verbose:
print(f"saved at: {full_name}")
_logger.debug("saved at: %s", full_name)


def get_time_for_track(delta_time, atlas_epoch):
Expand Down Expand Up @@ -465,7 +469,7 @@ def getATL03_beam(fileT, numpy=False, beam="gt1l", maxElev=1e6):
mask_total = mask_seaice | mask_ocean

if sum(~mask_total) == beam_data[:, 1].size:
print("zero photons, lower photon quality to 2 or higher")
_logger.debug("zero photons, lower photon quality to 2 or higher")
# lower quality threshold and recompute
quality_threshold = 1
mask_ocean = beam_data[:, 1] > quality_threshold
Expand Down Expand Up @@ -506,8 +510,8 @@ def getATL03_beam(fileT, numpy=False, beam="gt1l", maxElev=1e6):
}
)
# Filter out high elevation values
print("seg_dist shape ", segment_dist_x.shape)
print("df shape ", dF.shape)
_logger.debug("seg_dist shape %s", segment_dist_x.shape)
_logger.debug("df shape %s", dF.shape)

dF = dF[mask_total]
return dF, dF_seg
Expand Down
12 changes: 8 additions & 4 deletions src/icesat2waves/tools/sliderule_converter_tools.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

from ipyleaflet import (
Map,
basemaps,
Expand All @@ -11,6 +13,8 @@

import matplotlib.pyplot as plt

_logger = logging.getLogger(__name__)


# height correction tools
def correct_and_remove_height(Gi, height_limit):
Expand Down Expand Up @@ -362,11 +366,11 @@ def check_RGT_in_domain(Gtrack_lowest, gdf):
interect_list = list(result)
interect_list.sort()

print("RGT in domain: ", len(Gtrack_lowest["RGT"].unique()))
print("RGT with data found: ", len(gdf_list))
print("RGT in both: ", len(interect_list))
_logger.debug("RGT in domain: %s", len(Gtrack_lowest["RGT"].unique()))
_logger.debug("RGT with data found: %s", len(gdf_list))
_logger.debug("RGT in both: %s", len(interect_list))
if len(interect_list) != len(gdf_list):
print("RGT not in both: ", list(set(gdf_list) - result))
_logger.debug("RGT not in both: %s", list(set(gdf_list) - result))
return interect_list


Expand Down