Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: logging 3 - remove prints from tools directory #177

Merged
merged 16 commits into from
Mar 21, 2024
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 13 additions & 9 deletions src/icesat2_tracks/ICEsat2_SI_tools/angle_optimizer.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
"""
This library contains method, and classes used to search for the best angle given x,y data using single frequecy fits.
"""
import logging

from numba import jit
import numpy as np


_logger = logging.getLogger(__name__)

numba_parallel = False

def get_wavenumbers_polar( amp, angle_rad):
Expand Down Expand Up @@ -196,9 +200,9 @@ def set_parameters(self, par_dict, verbose= False):
self.seeds = np.vstack([pxx.flatten(), pyy.flatten() ]).T
self.params = params
if verbose:
print('Nwalker: ', self.nwalkers)
print('Seeds: ', self.seeds.shape)
print(self.params)
_logger.debug('Nwalker: %s', self.nwalkers)
_logger.debug('Seeds: %s', self.seeds.shape)
_logger.debug("Params: %s", self.params)

def test_objective_func(self):
return self.objective_func(self.params, *self.fitting_args, **self.fitting_kargs)
Expand All @@ -212,8 +216,8 @@ def sample(self, fitting_args= None , method='emcee', steps=100, verbose= True,
args=fitting_args, kws=fitting_kargs ,
nwalkers=self.nwalkers, steps=steps, pos= self.seeds,nan_policy='omit' , **kargs)
if verbose:
print(self.LM.report_fit(self.fitter))
print('results at self.fitter')
_logger.debug("%s", self.LM.report_fit(self.fitter))
_logger.debug('results at self.fitter')

def plot_sample(self, **kargs ):
import matplotlib.pyplot as plt
Expand All @@ -232,8 +236,8 @@ def optimize(self, fitting_args= None , method='dual_annealing', verbose= True):
self.fitter_optimize = self.LM.minimize(self.objective_func, self.params, method=method,
args=fitting_args, kws=fitting_kargs )
if verbose:
print(self.LM.report_fit(self.fitter_optimize))
print('results at self.fitter_optimize')
_logger.debug("fit report: %s", self.LM.report_fit(self.fitter_optimize))
_logger.debug('results at self.fitter_optimize')

def plot_optimze(self, **kargs):
import matplotlib.pyplot as plt
Expand All @@ -249,8 +253,8 @@ def brute(self, fitting_args= None , method='brute', verbose= True, N_grid = 30)
args=fitting_args, kws=fitting_kargs, Ns=N_grid )

if verbose:
print(self.LM.report_fit(self.fitter_brute))
print('results at self.fitter_brute')
_logger.debug("%s", self.LM.report_fit(self.fitter_brute))
_logger.debug('results at self.fitter_brute')


def plot_brute(self, clevel = np.linspace(-3.2, 3.2, 30), **kargs):
Expand Down
6 changes: 5 additions & 1 deletion src/icesat2_tracks/ICEsat2_SI_tools/beam_stats.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

import numpy as np
import pandas as pd
import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec
Expand All @@ -6,6 +8,8 @@
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec

_logger = logging.getLogger(__name__)


def derive_beam_statistics(Gd, all_beams, Lmeter=10e3, dx=10):
"""
Expand All @@ -24,7 +28,7 @@ def derive_beam_statistics(Gd, all_beams, Lmeter=10e3, dx=10):
elif isinstance(Gd, h5py.File):
Gi = io_local.get_beam_hdf_store(Gd[k])
else:
print("Gd is neither dict nor hdf5 file")
_logger.debug("Gd is neither dict nor hdf5 file")
break

dd = Gi["h_mean"]
Expand Down
8 changes: 5 additions & 3 deletions src/icesat2_tracks/ICEsat2_SI_tools/filter_regrid.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import logging

import numpy as np
from numba import jit

_logger = logging.getLogger(__name__)

def correct_heights(T03, T03c, coord = 'delta_time'):
"""
Expand Down Expand Up @@ -37,7 +39,7 @@ def track_pole_ward_file(hdf5_file, product='ALT03'):
T_lat = hdf5_file['gt1r/freeboard_beam_segment/latitude'][:]
T_time = hdf5_file['gt1r/freeboard_beam_segment/delta_time'][:]
#return ( T_lat[T_time.argmax()] - T_lat[T_time.argmin()] ) < 0
print('1st lat =' + str(abs(T_lat[T_time.argmin()])) , ';last lat =' + str(abs(T_lat[T_time.argmax()])) )
_logger.debug('1st lat = %s, ; last lat= %s', abs(T_lat[T_time.argmin()]), abs(T_lat[T_time.argmax()]))
return abs(T_lat[T_time.argmax()]) > abs(T_lat[T_time.argmin()])


Expand Down Expand Up @@ -175,7 +177,7 @@ def derive_axis(TT, lat_lims = None):
# decending track
lon_min = TT['lons'].min()

#print(lon_min)
#_logger.debug(lon_min)
TT['x'] = (TT['lons'] - lon_min) * np.cos( TT['lats']*np.pi/180.0 ) * dy
#TT['x'] = (TT['lons'] ) * np.cos( TT['lats']*np.pi/180.0 ) * dy
TT['dist'] = np.sqrt(TT['x']**2 + TT['y']**2)
Expand Down Expand Up @@ -448,7 +450,7 @@ def bin_means(T2, dist_grid):

for i in np.arange(1,ilim-1, 1):
if i % 5000 ==0:
print(i)
_logger.debug("i= %s", i)
i_mask=(T2['dist'] >= dist_grid[i-1]) & (T2['dist'] < dist_grid[i+1])
#if ( (T2['dist'] >= dist_grid[i-1]) & (T2['dist'] < dist_grid[i+1]) ).sum() > 0:
dF_mean[i] = T2[i_mask].mean()
Expand Down
53 changes: 27 additions & 26 deletions src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import copy
import logging
import time

from numpy import linalg
Expand All @@ -13,6 +14,8 @@
from icesat2_tracks.ICEsat2_SI_tools import lanczos, spectral_estimates as spec
import icesat2_tracks.local_modules.JONSWAP_gamma as spectal_models

_logger = logging.getLogger(__name__)


def rebin(data, dk):
"""
Expand Down Expand Up @@ -256,7 +259,7 @@ def calc_gFT_apply(stancil, prior):
if (
x.size / Lpoints < 0.40
): # if there are not enough photos set results to nan
print(" -- data density to low, skip stancil")
_logger.debug(" -- data density to low, skip stancil")
return {
"stancil_center": stancil[1],
"p_hat": np.concatenate([self.k * np.nan, self.k * np.nan]),
Expand Down Expand Up @@ -288,7 +291,7 @@ def calc_gFT_apply(stancil, prior):
# define error
err = ERR[x_mask] if ERR is not None else 1

print("compute time weights : ", time.perf_counter() - ta)
_logger.debug("compute time weights : %s", time.perf_counter() - ta)

ta = time.perf_counter()
FT.define_problem(weight, err)
Expand All @@ -297,17 +300,15 @@ def calc_gFT_apply(stancil, prior):
p_hat = FT.solve()

if np.isnan(np.mean(p_hat)):
print(" -- inversion nan!")
print(" -- data fraction", x.size / Lpoints)
print(
" -- weights:",
_logger.debug(" -- inversion nan!")
_logger.debug(" -- data fraction %s", x.size / Lpoints)
_logger.debug(
" -- weights: %s err: %s y: %s",
np.mean(weight),
"err:",
np.mean(err),
"y:",
np.mean(y),
)
print(" -- skip stancil")
_logger.debug(" -- skip stancil")
return {
"stancil_center": stancil[1],
"p_hat": np.concatenate([self.k * np.nan, self.k * np.nan]),
Expand All @@ -320,7 +321,7 @@ def calc_gFT_apply(stancil, prior):
"spec_adjust": np.nan,
}

print("compute time solve : ", time.perf_counter() - ta)
_logger.debug("compute time solve : %s", time.perf_counter() - ta)
ta = time.perf_counter()

x_pos = (np.round((x - stancil[0]) / self.dx, 0)).astype("int")
Expand All @@ -337,7 +338,7 @@ def calc_gFT_apply(stancil, prior):
for k, I in prior_pars.items():
inverse_stats[k] = I.value if hasattr(I, "value") else np.nan

print("compute time stats : ", time.perf_counter() - ta)
_logger.debug("compute time stats : %s", time.perf_counter() - ta)

# multiply with the standard deviation of the data to get dimensions right
PSD = power_from_model(p_hat, dk, self.k.size, x.size, Lpoints)
Expand Down Expand Up @@ -366,7 +367,7 @@ def calc_gFT_apply(stancil, prior):
plt.legend()
plt.show()

print("---------------------------------")
_logger.debug("---------------------------------")

# return dict with all relevant data
return_dict = {
Expand Down Expand Up @@ -397,18 +398,18 @@ def calc_gFT_apply(stancil, prior):
N_stencil = len(self.stancil_iter_list.T)
Ni = 1
for ss in copy.copy(self.stancil_iter):
print(Ni, "/", N_stencil, "Stancils")
_logger.debug("%s / %s Stancils", Ni, N_stencil)
# prior step
if prior[0] is False: # make NL fit of piors do not exist
print("1st step: with NL-fit")
_logger.debug("1st step: with NL-fit")
I_return = calc_gFT_apply(ss, prior=prior)
prior = I_return["PSD"], I_return["weight"]
# 2nd step
if prior[0] is False:
print("1st GFD failed (priors[0]=false), skip 2nd step")
_logger.debug("1st GFD failed (priors[0]=false), skip 2nd step")
else:
print("2nd step: use set priors:", type(prior[0]), type(prior[1]))
print(prior[0][0:3], prior[1][0:3])
_logger.debug("2nd step: use set priors of types: %s %s", type(prior[0]), type(prior[1]))
_logger.debug("first three elements of priors: %s %s", prior[0][0:3], prior[1][0:3])
I_return = calc_gFT_apply(ss, prior=prior)
prior = I_return["PSD"], I_return["weight"]

Expand Down Expand Up @@ -471,7 +472,7 @@ def calc_gFT_apply(stancil, prior):
N_per_stancil.append(I["x_size"])
Spec_adjust_per_stancil.append(spec_adjust)

print("# of x-coordinates" + str(len(Spec_returns)))
_logger.debug("# of x-coordinates %s", len(Spec_returns))

self.N_per_stancil = N_per_stancil
chunk_positions = np.array(list(D_specs.keys()))
Expand Down Expand Up @@ -578,10 +579,10 @@ def calc_gFT_apply(stancil, prior):
# check sizes and adjust if necessary.
if x_pos.size > I["model_error_x"].size:
x_pos = x_pos[0 : I["model_error_x"].size]
print("adjust x")
_logger.debug("adjust x")
elif x_pos.size < I["model_error_x"].size:
I["model_error_x"] = I["model_error_x"][0:-1]
print("adjust y")
_logger.debug("adjust y")

x_err[x_pos] = I["model_error_x"]
model_error_x[xi] = xr.DataArray(
Expand Down Expand Up @@ -657,10 +658,10 @@ def get_stancil_var_apply(stancil):

stancil_weighted_variance = np.nansum(np.array(stancil_vars)) / Nphotons

print("Parcevals Theorem:")
print("variance of timeseries: ", DATA.var())
print("mean variance of stancils: ", stancil_weighted_variance)
print("variance of the optimzed windowed LS Spectrum: ", self.calc_var())
_logger.debug("Parcevals Theorem:")
_logger.debug("variance of timeseries: %s", DATA.var())
_logger.debug("mean variance of stancils: %s", stancil_weighted_variance)
_logger.debug("variance of the optimzed windowed LS Spectrum: %s", self.calc_var())

if add_attrs:
self.G.attrs["variance_unweighted_data"] = DATA.var()
Expand Down Expand Up @@ -874,7 +875,7 @@ def get_stats(self, dk, Nx_full, print_flag=False):
"var_spec_complete",
"spec_adjust",
]:
print(ki.ljust(20) + str(pars[ki]))
_logger.debug("%s", ki.ljust(20) + str(pars[ki]))

return pars

Expand Down Expand Up @@ -972,7 +973,7 @@ def runningmean(self, var, m, tailcopy=False):
m = int(m)
s = var.shape
if s[0] <= 2 * m:
print("0 Dimension is smaller then averaging length")
_logger.debug("0 Dimension is smaller then averaging length")
return
rr = np.asarray(var) * np.nan

Expand Down
Loading
Loading