From 57d9a22b946072d261dd8bfa62ab59cf438ba2d2 Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Tue, 26 Dec 2023 20:15:21 -0500 Subject: [PATCH] cleaning and formatting files --- .../ICEsat2_SI_tools/generalized_FT.py | 962 ++++++++++-------- .../analysis_db/B03_plot_spectra_ov.py | 695 +++++++------ 2 files changed, 897 insertions(+), 760 deletions(-) diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py index 1a2d2fcd..a8579e80 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py @@ -1,43 +1,46 @@ - import numpy as np import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec import icesat2_tracks.ICEsat2_SI_tools.lanczos as lanczos -def rebin(data, dk, return_edges =False): + +def rebin(data, dk, return_edges=False): """ rebin data to a new k-grid with dk """ - k_low_limits =data.k[::10] - Gmean = data.groupby_bins('k' , k_low_limits).mean() - k_low = (k_low_limits + k_low_limits.diff('k')[0]/2).data - Gmean['k_bins'] = k_low[0:-1] - Gmean = Gmean.rename({'k_bins': 'k'}) + k_low_limits = data.k[::10] + Gmean = data.groupby_bins("k", k_low_limits).mean() + k_low = (k_low_limits + k_low_limits.diff("k")[0] / 2).data + Gmean["k_bins"] = k_low[0:-1] + Gmean = Gmean.rename({"k_bins": "k"}) if return_edges: return Gmean, k_low_limits else: return Gmean + # define weight function def smooth_data_to_weight(dd, m=150): - """ returns a weight function from smooth data dd is the data m is the number of points to smooth over """ - dd_fake = np.ones( 4*m + dd.size)*dd.max()*0.01 - dd_fake[2*m:-2*m]=dd + dd_fake = np.ones(4 * m + dd.size) * dd.max() * 0.01 + dd_fake[2 * m : -2 * m] = dd weight = lanczos.lanczos_filter_1d_wrapping(np.arange(dd_fake.size), dd_fake, m) - #weight= M.runningmean_wrap_around(dd_fake, m=m) - weight=weight[2*m:-2*m] - weight=weight/weight.max() + # weight= M.runningmean_wrap_around(dd_fake, m=m) + weight = weight[2 * m : -2 * m] + weight = weight / weight.max() return weight -def get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=False, method = 'gaussian' ): + +def get_weights_from_data( + x, y, dx, stancil, k, max_nfev, plot_flag=False, method="gaussian" +): """ x,y, x postions and y data, on any (regular) postion, has gaps dx dx @@ -47,13 +50,12 @@ def get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=False, metho returns: peak-normalized weights in the size of k """ - #make y gridded - x_pos = (np.round( (x - stancil[0])/ dx -1 , 0) ).astype('int') - x_model = np.arange(stancil[0], stancil[-1], dx) - y_gridded = np.copy(x_model) * 0 + # make y gridded + x_pos = (np.round((x - stancil[0]) / dx - 1, 0)).astype("int") + x_model = np.arange(stancil[0], stancil[-1], dx) + y_gridded = np.copy(x_model) * 0 y_gridded[x_pos] = y - #nan_mask =np.isnan(y_gridded) - + # nan_mask =np.isnan(y_gridded) # def gaus(x, x_0, amp, sigma_g ): # return amp* np.exp(-0.5 * ( (x-x_0)/sigma_g)**2) @@ -61,103 +63,117 @@ def get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=False, metho # weight = weight *10+ weight.max()* 0.005 # add pemnalty floor # take FFT to get peaj parameters - k_fft = np.fft.rfftfreq(x_model.size, d=dx) * 2* np.pi - f_weight= np.sqrt(9.81 * k_fft) / (2 *np.pi) - data_weight = spec.Z_to_power(np.fft.rfft(y_gridded), np.diff(f_weight).mean(), x_pos.size) + k_fft = np.fft.rfftfreq(x_model.size, d=dx) * 2 * np.pi + f_weight = np.sqrt(9.81 * k_fft) / (2 * np.pi) + data_weight = spec.Z_to_power( + np.fft.rfft(y_gridded), np.diff(f_weight).mean(), x_pos.size + ) - Spec_fft = get_prior_spec(f_weight, data_weight ) + Spec_fft = get_prior_spec(f_weight, data_weight) - pars = Spec_fft.set_parameters(flim= np.sqrt(9.81 * k[-1] ) /2/np.pi) - k_max = (pars['f_max'].value *2 *np.pi)**2/ 9.81 - #print('k_max ', k_max) + pars = Spec_fft.set_parameters(flim=np.sqrt(9.81 * k[-1]) / 2 / np.pi) + k_max = (pars["f_max"].value * 2 * np.pi) ** 2 / 9.81 + # print('k_max ', k_max) - - if method is 'gaussian': + if method == "gaussian": # simple gaussian weight - def gaus(x, x_0, amp, sigma_g ): - return amp* np.exp(-0.5 * ( (x-x_0)/sigma_g)**2) + def gaus(x, x_0, amp, sigma_g): + return amp * np.exp(-0.5 * ((x - x_0) / sigma_g) ** 2) - weight = gaus(k, k_max, 1 , 0.02)**(1/2) - #weight = weight *1+ weight.max()* 0.1 # add pemnalty floor + weight = gaus(k, k_max, 1, 0.02) ** (1 / 2) + # weight = weight *1+ weight.max()* 0.1 # add pemnalty floor params = None - elif method is 'parametric': - + elif method == "parametric": # JONSWAP weight - f= np.sqrt(9.81 * k) / (2 *np.pi) - #weight = weight + weight.max()* 0.1 # add pemnalty floor + f = np.sqrt(9.81 * k) / (2 * np.pi) + # weight = weight + weight.max()* 0.1 # add pemnalty floor # optimzes paramteric function to data - #Spec_fft.data = Spec_fft.runningmean(Spec_fft.data , 10, tailcopy=True) - #Spec_fft.data[np.isnan(Spec_fft.data)] = 0 + # Spec_fft.data = Spec_fft.runningmean(Spec_fft.data , 10, tailcopy=True) + # Spec_fft.data[np.isnan(Spec_fft.data)] = 0 - weight = Spec_fft.create_weight(freq = f, plot_flag= False, max_nfev=max_nfev) + weight = Spec_fft.create_weight(freq=f, plot_flag=False, max_nfev=max_nfev) if plot_flag: Spec_fft.fitter.params.pretty_print() params = Spec_fft.fitter.params - #weight = weight+ weight.max()* 0.05 # add pemnalty floor + # weight = weight+ weight.max()* 0.05 # add pemnalty floor else: raise ValueError(" 'method' must be either 'gaussian' or 'parametric' ") - if plot_flag: import matplotlib.pyplot as plt - #plt.plot(k_fft[1:], Spec_fft.model_func(Spec_fft.freq, pars), 'b--' ) - plt.plot(k_fft[1:], Spec_fft.data, c='gray',label='FFT for Prior', linewidth = 0.5) - plt.plot(k, weight, zorder=12, c='black' , label = 'Fitted model to FFT', linewidth = 0.5) - plt.xlim(k[0],k[-1] ) - #plt.show() + # plt.plot(k_fft[1:], Spec_fft.model_func(Spec_fft.freq, pars), 'b--' ) + plt.plot( + k_fft[1:], Spec_fft.data, c="gray", label="FFT for Prior", linewidth=0.5 + ) + plt.plot( + k, weight, zorder=12, c="black", label="Fitted model to FFT", linewidth=0.5 + ) + plt.xlim(k[0], k[-1]) + # plt.show() # add pemnalty floor - weight = weight + weight.max()* 0.1 + weight = weight + weight.max() * 0.1 # peak normlize weight - weight = weight/weight.max() + weight = weight / weight.max() return weight, params + def make_xarray_from_dict(D, name, dims, coords): import xarray as xr + D_return = dict() - for xi,I in D.items(): - coords['x'] = xi - D_return[xi] = xr.DataArray(I, dims=dims, coords=coords , name=name) + for xi, I in D.items(): + coords["x"] = xi + D_return[xi] = xr.DataArray(I, dims=dims, coords=coords, name=name) return D_return + def define_weights(stancil, prior, x, y, dx, k, max_nfev, plot_flag=False): """ defines weights for the inversion, either from the data or from the prior, or a mix return weights normalized to 1, prior_pars used for the next iteration """ - if (type(prior[0]) is bool) and not prior[0] : # prior = (False, None), this is the first iteration + if (type(prior[0]) is bool) and not prior[ + 0 + ]: # prior = (False, None), this is the first iteration # fit function to data - weight, prior_pars = get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=plot_flag, method='parametric') - #weight_name = "10 * $P_{init}$ from FFT" + weight, prior_pars = get_weights_from_data( + x, y, dx, stancil, k, max_nfev, plot_flag=plot_flag, method="parametric" + ) + # weight_name = "10 * $P_{init}$ from FFT" weight_name = "$P_{init}$ from FFT" - elif (type(prior) is tuple): # prior= (PSD_from_GFT, weight_used in inversion), this is all other first iteration + elif ( + type(prior) is tuple + ): # prior= (PSD_from_GFT, weight_used in inversion), this is all other first iteration # combine old and new weights weight = 0.2 * smooth_data_to_weight(prior[0]) + 0.8 * prior[1] - #weight_name = "10 * smth. $P_{i-1}$" + # weight_name = "10 * smth. $P_{i-1}$" weight_name = "smth. $P_{i-1}$" - prior_pars = {'alpha': None, 'amp': None, 'f_max': None, 'gamma':None} - else: # prior = weight, this is all other iterations + prior_pars = {"alpha": None, "amp": None, "f_max": None, "gamma": None} + else: # prior = weight, this is all other iterations weight = smooth_data_to_weight(prior) weight_name = "smth. from data" - prior_pars = {'alpha': None, 'amp': None, 'f_max': None, 'gamma':None} + prior_pars = {"alpha": None, "amp": None, "f_max": None, "gamma": None} if plot_flag: import matplotlib.pyplot as plt - plt.plot(k, weight, zorder=12, c='darkgreen', linewidth = 0.8,label = weight_name) + + plt.plot(k, weight, zorder=12, c="darkgreen", linewidth=0.8, label=weight_name) # peak normlize weights by std of data - weight = weight/ weight.std() + weight = weight / weight.std() return weight, prior_pars - + + class wavenumber_spectrogram_gFT(object): - def __init__(self, x, data, L, dx, wavenumber, data_error = None, ov=None): + def __init__(self, x, data, L, dx, wavenumber, data_error=None, ov=None): """ returns a wavenumber spectrogram with the resolution L-ov this uses Lombscargle @@ -177,21 +193,31 @@ def __init__(self, x, data, L, dx, wavenumber, data_error = None, ov=None): other arributes are in the .attr dict. """ - self.Lmeters = L - self.ov = int(L/2) if ov is None else ov #when not defined in create_chunk_boundaries then L/2 + self.Lmeters = L + self.ov = ( + int(L / 2) if ov is None else ov + ) # when not defined in create_chunk_boundaries then L/2 - self.x = x - self.dx = dx - self.data = data + self.x = x + self.dx = dx + self.data = data self.error = data_error if data_error is not None else None - self.Lpoints= int(self.Lmeters/self.dx) + self.Lpoints = int(self.Lmeters / self.dx) # create subsample k - self.k, self.dk = wavenumber, np.diff(wavenumber).mean() - - - def cal_spectrogram(self, x = None, data=None, error=None, name=None, xlims =None, max_nfev = None, map_func=None, plot_flag = False): - + self.k, self.dk = wavenumber, np.diff(wavenumber).mean() + + def cal_spectrogram( + self, + x=None, + data=None, + error=None, + name=None, + xlims=None, + max_nfev=None, + map_func=None, + plot_flag=False, + ): """ defines apply function and calculated all sub-sample sprectra using map @@ -214,23 +240,20 @@ def cal_spectrogram(self, x = None, data=None, error=None, name=None, xlims =Non import copy import pandas as pd - X = self.x if x is None else x # all x positions - DATA = self.data if data is None else data # all data points - ERR = self.error if error is None else error # all error for points - Lmeters, dk = self.Lmeters, self.dk + X = self.x if x is None else x # all x positions + DATA = self.data if data is None else data # all data points + ERR = self.error if error is None else error # all error for points + Lmeters, dk = self.Lmeters, self.dk Lpoints = self.Lpoints - Lpoints_full = int(Lmeters/self.dx) - #win = self.win - self.xlims = ( np.round(X.min()), X.max() ) if xlims is None else xlims + Lpoints_full = int(Lmeters / self.dx) + # win = self.win + self.xlims = (np.round(X.min()), X.max()) if xlims is None else xlims # init Lomb scargle object with noise as nummy data () - #dy_fake= np.random.randn(len(dy))*0.001 if self.dy is not None else None - #self.LS = LombScargle(X[0:L] , np.random.randn(L)*0.001, fit_mean=True) - - + # dy_fake= np.random.randn(len(dy))*0.001 if self.dy is not None else None + # self.LS = LombScargle(X[0:L] , np.random.randn(L)*0.001, fit_mean=True) def calc_gFT_apply(stancil, prior): - """ windows the data accoding to stencil and applies LS spectrogram returns: stancil center, spectrum for this stencil, number of datapoints in stancil @@ -238,238 +261,287 @@ def calc_gFT_apply(stancil, prior): from scipy.signal import detrend import matplotlib.pyplot as plt import time + ta = time.perf_counter() - #x = X[stancil[0]:stancil[-1]] - x_mask= (stancil[0] <= X) & (X <= stancil[-1]) + # x = X[stancil[0]:stancil[-1]] + x_mask = (stancil[0] <= X) & (X <= stancil[-1]) print(stancil[1]) x = X[x_mask] - if x.size/Lpoints < 0.1: # if there are not enough photos set results to nan - #return stancil[1], self.k*np.nan, np.fft.rfftfreq( int(self.Lpoints), d=self.dx)*np.nan, x.size - #return stancil[1], np.concatenate([self.k*np.nan , self.k*np.nan]), np.nan, np.nan, np.nan, x.size, False, False - return { 'stancil_center': stancil[1], - 'p_hat': np.concatenate([self.k*np.nan , self.k*np.nan]), - 'inverse_stats': np.nan, - 'y_model_grid': np.nan, - 'y_data_grid': np.nan, - 'x_size': x.size, - 'PSD': False, - 'weight': False, - 'spec_adjust': np.nan} - + if ( + x.size / Lpoints < 0.1 + ): # if there are not enough photos set results to nan + # return stancil[1], self.k*np.nan, np.fft.rfftfreq( int(self.Lpoints), d=self.dx)*np.nan, x.size + # return stancil[1], np.concatenate([self.k*np.nan , self.k*np.nan]), np.nan, np.nan, np.nan, x.size, False, False + return { + "stancil_center": stancil[1], + "p_hat": np.concatenate([self.k * np.nan, self.k * np.nan]), + "inverse_stats": np.nan, + "y_model_grid": np.nan, + "y_data_grid": np.nan, + "x_size": x.size, + "PSD": False, + "weight": False, + "spec_adjust": np.nan, + } y = DATA[x_mask] y_var = y.var() FT = generalized_Fourier(x, y, self.k) - #H = FT.get_H() + # H = FT.get_H() if plot_flag: import matplotlib.pyplot as plt - plt.figure(figsize=(3.34, 1.8),dpi=300) + + plt.figure(figsize=(3.34, 1.8), dpi=300) # define weights. Weights are normalized to 1 - weight, prior_pars =define_weights(stancil, prior, x, y, self.dx, self.k , max_nfev, plot_flag=plot_flag) + weight, prior_pars = define_weights( + stancil, prior, x, y, self.dx, self.k, max_nfev, plot_flag=plot_flag + ) # rescale weights to 80% of the variance of the data - weight = weight * 0.8 * y_var + weight = weight * 0.8 * y_var # define error err = ERR[x_mask] if ERR is not None else 1 - - print( 'weights : ', time.perf_counter() - ta) + print("weights : ", time.perf_counter() - ta) ta = time.perf_counter() - FT.define_problem(weight, err) # 1st arg is Penalty, 2nd is error + FT.define_problem(weight, err) # 1st arg is Penalty, 2nd is error # solve problem: p_hat = FT.solve() - print( 'solve : ', time.perf_counter() - ta) + print("solve : ", time.perf_counter() - ta) ta = time.perf_counter() - x_pos = (np.round( (x - stancil[0])/ self.dx , 0) ).astype('int') - eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters/2 - y_model_grid = np.copy(eta) *np.nan - y_model_grid[x_pos] = FT.model() # returns dimensional model + x_pos = (np.round((x - stancil[0]) / self.dx, 0)).astype("int") + eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters / 2 + y_model_grid = np.copy(eta) * np.nan + y_model_grid[x_pos] = FT.model() # returns dimensional model # save data on this grid as well - y_data_grid = np.copy(eta) *np.nan + y_data_grid = np.copy(eta) * np.nan y_data_grid[x_pos] = y inverse_stats = FT.get_stats(self.dk, Lpoints_full, print_flag=True) # add fitting parameters of Prior to stats dict - for k,I in prior_pars.items(): + for k, I in prior_pars.items(): try: inverse_stats[k] = I.value except: inverse_stats[k] = np.nan - - print( 'stats : ', time.perf_counter() - ta) + + print("stats : ", time.perf_counter() - ta) # Z = complex_represenation(p_hat, FT.M, Lpoints ) # multiply with the standard deviation of the data to get dimensions right - PSD = power_from_model(p_hat, dk, self.k.size, x.size, Lpoints) #Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) - - if self.k.size*2 > x.size: - col = 'red' + PSD = power_from_model( + p_hat, dk, self.k.size, x.size, Lpoints + ) # Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) + + if self.k.size * 2 > x.size: + col = "red" else: - col= 'blue' + col = "blue" if plot_flag: - #PSD_nondim = power_from_model(p_hat , dk, self.k.size, x.size, Lpoints) #Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) - plt.plot(self.k, PSD, color=col , label= 'GFT fit', linewidth = 0.5) - plt.title( 'non-dim Spectral Segment Models, 2M='+ str(self.k.size*2) + ', N='+ str(x.size) +'\n@ $X_i=$'+str(round(stancil[1]/1e3, 1)) +'km' , loc='left', size=6) - plt.xlim(self.k[0],self.k[-1]) - plt.xlabel('Wavenumber k') - plt.ylabel('Power (m^2/k)') + # PSD_nondim = power_from_model(p_hat , dk, self.k.size, x.size, Lpoints) #Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) + plt.plot(self.k, PSD, color=col, label="GFT fit", linewidth=0.5) + plt.title( + "non-dim Spectral Segment Models, 2M=" + + str(self.k.size * 2) + + ", N=" + + str(x.size) + + "\n@ $X_i=$" + + str(round(stancil[1] / 1e3, 1)) + + "km", + loc="left", + size=6, + ) + plt.xlim(self.k[0], self.k[-1]) + plt.xlabel("Wavenumber k") + plt.ylabel("Power (m^2/k)") plt.legend() plt.show() - print('---------------------------------') + print("---------------------------------") # return dict with all relevant data return_dict = { - 'stancil_center': stancil[1], - 'p_hat': p_hat, - 'inverse_stats': inverse_stats, - 'y_model_grid': y_model_grid, - 'y_data_grid': y_data_grid, - 'x_size': x.size, - 'PSD': PSD, - 'weight': weight, - 'spec_adjust': inverse_stats['spec_adjust'] + "stancil_center": stancil[1], + "p_hat": p_hat, + "inverse_stats": inverse_stats, + "y_model_grid": y_model_grid, + "y_data_grid": y_data_grid, + "x_size": x.size, + "PSD": PSD, + "weight": weight, + "spec_adjust": inverse_stats["spec_adjust"], } return return_dict - # stancil[1], p_hat, - # inverse_stats, y_model_grid , - # y_data_grid, x.size, - # PSD, weight, + # stancil[1], p_hat, + # inverse_stats, y_model_grid , + # y_data_grid, x.size, + # PSD, weight, # inverse_stats['spec_adjust'] - # % derive L2 stancil - self.stancil_iter = spec.create_chunk_boundaries_unit_lengths(Lmeters, self.xlims, ov= self.ov, iter_flag=True) - #stancil_iter = create_chunk_boundaries_unit_lengths(L, ( np.round(X.min()), X.max() ), ov= self.ov, iter_flag=True) + self.stancil_iter = spec.create_chunk_boundaries_unit_lengths( + Lmeters, self.xlims, ov=self.ov, iter_flag=True + ) + # stancil_iter = create_chunk_boundaries_unit_lengths(L, ( np.round(X.min()), X.max() ), ov= self.ov, iter_flag=True) # apply func to all stancils - Spec_returns=list() + Spec_returns = list() # form: PSD_from_GFT, weight_used in inversion - prior= False, False + prior = False, False for ss in copy.copy(self.stancil_iter): - #print(ss) - #prior= False, False + # print(ss) + # prior= False, False # prior step - if prior[0] is False: # make NL fit of piors do not exist - print('1st step with NL-fit') + if prior[0] is False: # make NL fit of piors do not exist + print("1st step with NL-fit") I_return = calc_gFT_apply(ss, prior=prior) - prior = I_return['PSD'], I_return['weight'] #I_return[6], I_return[7] + prior = I_return["PSD"], I_return["weight"] # I_return[6], I_return[7] # 2nd step if prior[0] is False: - print('priors still false skip 2nd step') + print("priors still false skip 2nd step") else: - print('2nd step use set priors:', type(prior[0]), type(prior[0]) ) + print("2nd step use set priors:", type(prior[0]), type(prior[0])) I_return = calc_gFT_apply(ss, prior=prior) - prior = I_return['PSD'], I_return['weight'] # I_return[6], I_return[7] - - #print(I_return[6]) - Spec_returns.append( dict((k, I_return[k]) for k in ('stancil_center', 'p_hat', 'inverse_stats', 'y_model_grid', 'y_data_grid', 'x_size', 'spec_adjust', 'weight'))) - #Spec_returns.append( [I_return[0],I_return[1],I_return[2],I_return[3],I_return[4],I_return[5]] ) + prior = I_return["PSD"], I_return["weight"] # I_return[6], I_return[7] + + # print(I_return[6]) + Spec_returns.append( + dict( + (k, I_return[k]) + for k in ( + "stancil_center", + "p_hat", + "inverse_stats", + "y_model_grid", + "y_data_grid", + "x_size", + "spec_adjust", + "weight", + ) + ) + ) + # Spec_returns.append( [I_return[0],I_return[1],I_return[2],I_return[3],I_return[4],I_return[5]] ) # map_func = map if map_func is None else map_func # print(map_func) # Spec_returns = list(map_func( calc_gFT_apply, copy.copy(self.stancil_iter) )) # # linear version - #Spec_returns = list(map( calc_spectrum_and_field_apply, copy.copy(self.stancil_iter) )) + # Spec_returns = list(map( calc_spectrum_and_field_apply, copy.copy(self.stancil_iter) )) # unpack resutls of the mapping: - GFT_model = dict() - Z_model = dict() + GFT_model = dict() + Z_model = dict() - D_specs = dict() - D_specs_model = dict() + D_specs = dict() + D_specs_model = dict() - Pars = dict() - y_model = dict() - y_data = dict() - N_per_stancil = list() + Pars = dict() + y_model = dict() + y_data = dict() + N_per_stancil = list() Spec_adjust_per_stancil = list() - weights = dict() + weights = dict() for I in Spec_returns: - - x_center = I['stancil_center'] - spec_adjust = I['spec_adjust'] - GFT_model[x_center] = (I['p_hat'][0:self.k.size], I['p_hat'][self.k.size:]) - Z_model[x_center] = Z = complex_represenation(I['p_hat'], self.k.size, Lpoints ) - - PSD_data, PSD_model = Z_to_power_gFT(Z, self.dk, I['x_size'], Lpoints ) - D_specs[x_center] = PSD_data * spec_adjust - D_specs_model[x_center] = PSD_model * spec_adjust * 0 # set to zero because this data should not be used anymore - - Pars[x_center] = I['inverse_stats'] - y_model[x_center] = I['y_model_grid'] - y_data[x_center] = I['y_data_grid'] - - weights[x_center] = I['weight'] - - N_per_stancil.append(I['x_size']) + x_center = I["stancil_center"] + spec_adjust = I["spec_adjust"] + GFT_model[x_center] = ( + I["p_hat"][0 : self.k.size], + I["p_hat"][self.k.size :], + ) + Z_model[x_center] = Z = complex_represenation( + I["p_hat"], self.k.size, Lpoints + ) + + PSD_data, PSD_model = Z_to_power_gFT(Z, self.dk, I["x_size"], Lpoints) + D_specs[x_center] = PSD_data * spec_adjust + D_specs_model[x_center] = ( + PSD_model * spec_adjust * 0 + ) # set to zero because this data should not be used anymore + + Pars[x_center] = I["inverse_stats"] + y_model[x_center] = I["y_model_grid"] + y_data[x_center] = I["y_data_grid"] + + weights[x_center] = I["weight"] + + N_per_stancil.append(I["x_size"]) Spec_adjust_per_stancil.append(spec_adjust) - print("# of x-coordinates" + str(len(Spec_returns)) ) + print("# of x-coordinates" + str(len(Spec_returns))) - self.N_per_stancil = N_per_stancil - chunk_positions = np.array(list(D_specs.keys())) - self.N_stancils = len(chunk_positions) # number of spectral realizatiobs + self.N_per_stancil = N_per_stancil + chunk_positions = np.array(list(D_specs.keys())) + self.N_stancils = len(chunk_positions) # number of spectral realizatiobs # repack data, create xarray # 1st LS spectal estimates - # G_power_data = dict() # for xi,I in D_specs.items(): # G_power_data[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_PSD_data') - G_power_data = make_xarray_from_dict(D_specs, 'gFT_PSD_data', ['k'], {'k': self.k} ) - G_power_data = xr.concat(G_power_data.values(), dim='x').T#.to_dataset() - + G_power_data = make_xarray_from_dict( + D_specs, "gFT_PSD_data", ["k"], {"k": self.k} + ) + G_power_data = xr.concat(G_power_data.values(), dim="x").T # .to_dataset() # G_power_model = dict() # for xi,I in D_specs_model.items(): # G_power_model[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_PSD_model') - G_power_model = make_xarray_from_dict(D_specs_model, 'gFT_PSD_model', ['k'], {'k': self.k} ) - G_power_model = xr.concat(G_power_model.values(), dim='x').T#.to_dataset() - - self.G = G_power_model - self.G.name = 'gFT_PSD_model' + G_power_model = make_xarray_from_dict( + D_specs_model, "gFT_PSD_model", ["k"], {"k": self.k} + ) + G_power_model = xr.concat(G_power_model.values(), dim="x").T # .to_dataset() + + self.G = G_power_model + self.G.name = "gFT_PSD_model" - #2nd FFT(Y_model) + # 2nd FFT(Y_model) # G_model_Z =dict() # for xi,I in Z_model.items(): # # if I.size < Y_model_k_fft.size: # # I = np.insert(I, -1, I[-1]) # G_model_Z[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='Z_hat') - G_model_Z = make_xarray_from_dict(Z_model, 'Z_hat', ['k'], {'k': self.k} ) - G_model_Z = xr.concat(G_model_Z.values(), dim='x').T#.to_dataset() + G_model_Z = make_xarray_from_dict(Z_model, "Z_hat", ["k"], {"k": self.k}) + G_model_Z = xr.concat(G_model_Z.values(), dim="x").T # .to_dataset() # 3rd - GFT_model_coeff_A =dict() - GFT_model_coeff_B =dict() - for xi,I in GFT_model.items(): + GFT_model_coeff_A = dict() + GFT_model_coeff_B = dict() + for xi, I in GFT_model.items(): # if I.size < Y_model_k_fft.size: # I = np.insert(I, -1, I[-1]) - GFT_model_coeff_A[xi] = xr.DataArray(I[0], dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_cos_coeff') - GFT_model_coeff_B[xi] = xr.DataArray(I[1], dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_sin_coeff') - - GFT_model_coeff_A = xr.concat(GFT_model_coeff_A.values(), dim='x').T#.to_dataset() - GFT_model_coeff_B = xr.concat(GFT_model_coeff_B.values(), dim='x').T#.to_dataset() + GFT_model_coeff_A[xi] = xr.DataArray( + I[0], dims=["k"], coords={"k": self.k, "x": xi}, name="gFT_cos_coeff" + ) + GFT_model_coeff_B[xi] = xr.DataArray( + I[1], dims=["k"], coords={"k": self.k, "x": xi}, name="gFT_sin_coeff" + ) + + GFT_model_coeff_A = xr.concat( + GFT_model_coeff_A.values(), dim="x" + ).T # .to_dataset() + GFT_model_coeff_B = xr.concat( + GFT_model_coeff_B.values(), dim="x" + ).T # .to_dataset() # add weights to the data - weights_k = make_xarray_from_dict(weights , 'weight' , ['k'], {'k': self.k} ) - weights_k = xr.concat(weights_k.values() , dim='x').T#.to_dataset() + weights_k = make_xarray_from_dict(weights, "weight", ["k"], {"k": self.k}) + weights_k = xr.concat(weights_k.values(), dim="x").T # .to_dataset() # 4th: model in real space # y_model_eta =dict() @@ -479,88 +551,123 @@ def calc_gFT_apply(stancil, prior): # y_model_eta[xi] = xr.DataArray(y_model[xi], dims=['eta'], coords={'eta': eta, 'x': xi } , name="y_model") # y_data_eta[xi] = xr.DataArray(y_data[xi] , dims=['eta'], coords={'eta': eta, 'x': xi } , name="y_data") - eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters/2 - y_model_eta = make_xarray_from_dict(y_model, 'y_model', ['eta'], {'eta': eta} ) - y_data_eta = make_xarray_from_dict(y_data , 'y_data' , ['eta'], {'eta': eta} ) - - y_model_eta = xr.concat(y_model_eta.values(), dim='x').T#.to_dataset() - y_data_eta = xr.concat(y_data_eta.values() , dim='x').T#.to_dataset() + eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters / 2 + y_model_eta = make_xarray_from_dict(y_model, "y_model", ["eta"], {"eta": eta}) + y_data_eta = make_xarray_from_dict(y_data, "y_data", ["eta"], {"eta": eta}) + y_model_eta = xr.concat(y_model_eta.values(), dim="x").T # .to_dataset() + y_data_eta = xr.concat(y_data_eta.values(), dim="x").T # .to_dataset() # merge wavenumber datasets - self.GG = xr.merge([G_power_data, G_power_model, G_model_Z, GFT_model_coeff_A, GFT_model_coeff_B, weights_k]) - self.GG.attrs['ov'] = self.ov - self.GG.attrs['L'] = self.Lmeters - self.GG.attrs['Lpoints'] = self.Lpoints - self.GG.coords['N_per_stancil'] = ( ('x'), N_per_stancil) - self.GG.coords['spec_adjust'] = ( ('x'), Spec_adjust_per_stancil) - - #self.GG.expand_dims(dim='eta') + self.GG = xr.merge( + [ + G_power_data, + G_power_model, + G_model_Z, + GFT_model_coeff_A, + GFT_model_coeff_B, + weights_k, + ] + ) + self.GG.attrs["ov"] = self.ov + self.GG.attrs["L"] = self.Lmeters + self.GG.attrs["Lpoints"] = self.Lpoints + self.GG.coords["N_per_stancil"] = (("x"), N_per_stancil) + self.GG.coords["spec_adjust"] = (("x"), Spec_adjust_per_stancil) + + # self.GG.expand_dims(dim='eta') # eta = np.arange(0, self.L + self.dx, self.dx) - self.L/2 # self.GG.coords['eta'] = ( ('eta'), eta ) # #self.GG['win'] = ( ('eta'), np.insert(self.win, -1, self.win[-1])) - # create dataframe with fitted parameters and derive y_model and errors # reduce to valid values - PP2= dict() + PP2 = dict() for k, I in Pars.items(): if I is not np.nan: - PP2[k] =I - #print(Pars) - #print(PP2) + PP2[k] = I + # print(Pars) + # print(PP2) keys = Pars[next(iter(PP2))].keys() - keys_DF = list(set(keys) - set(['model_error_k', 'model_error_x'])) - params_dataframe = pd.DataFrame(index =keys_DF) - model_error_k_cos =dict() - model_error_k_sin =dict() - model_error_x =dict() - for xi,I in Pars.items(): - + keys_DF = list(set(keys) - set(["model_error_k", "model_error_x"])) + params_dataframe = pd.DataFrame(index=keys_DF) + model_error_k_cos = dict() + model_error_k_sin = dict() + model_error_x = dict() + for xi, I in Pars.items(): if I is not np.nan: - params_dataframe[xi] = [I[ki] for ki in keys_DF] - model_error_k_cos[xi] = xr.DataArray(I['model_error_k'][0:self.k.size], dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_cos') - model_error_k_sin[xi] = xr.DataArray(I['model_error_k'][self.k.size:], dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_sin') - - sta, ste = xi- self.Lmeters/2, xi+self.Lmeters/2 - #x_mask= (sta <= X) & (X <= ste) - x_pos = (np.round( (X[(sta <= X) & (X <= ste)] - sta)/ self.dx ) ).astype('int') - x_err = np.copy(eta) *np.nan + model_error_k_cos[xi] = xr.DataArray( + I["model_error_k"][0 : self.k.size], + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_cos", + ) + model_error_k_sin[xi] = xr.DataArray( + I["model_error_k"][self.k.size :], + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_sin", + ) + + sta, ste = xi - self.Lmeters / 2, xi + self.Lmeters / 2 + # x_mask= (sta <= X) & (X <= ste) + x_pos = (np.round((X[(sta <= X) & (X <= ste)] - sta) / self.dx)).astype( + "int" + ) + x_err = np.copy(eta) * np.nan # check sizes and adjust if necessary. - if x_pos.size > I['model_error_x'].size: - x_pos = x_pos[ 0 : I['model_error_x'].size ] - print('adjust x') - elif x_pos.size < I['model_error_x'].size: - I['model_error_x'] = I['model_error_x'][0:-1]# np.append(I['model_error_x'], I['model_error_x'][-1]) - print('adjust y') - - #print(x_pos.size , I['model_error_x'].size) - x_err[x_pos] = I['model_error_x'] - model_error_x[xi] = xr.DataArray(x_err, dims=['eta'], coords={'eta': eta, 'x': xi } , name='model_error_x') + if x_pos.size > I["model_error_x"].size: + x_pos = x_pos[0 : I["model_error_x"].size] + print("adjust x") + elif x_pos.size < I["model_error_x"].size: + I["model_error_x"] = I["model_error_x"][ + 0:-1 + ] # np.append(I['model_error_x'], I['model_error_x'][-1]) + print("adjust y") + + # print(x_pos.size , I['model_error_x'].size) + x_err[x_pos] = I["model_error_x"] + model_error_x[xi] = xr.DataArray( + x_err, + dims=["eta"], + coords={"eta": eta, "x": xi}, + name="model_error_x", + ) else: - model_error_k_cos[xi] = xr.DataArray(np.zeros(self.k.size)*np.nan, dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_cos') - model_error_k_sin[xi] = xr.DataArray(np.zeros(self.k.size)*np.nan, dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_sin') - - model_error_x[xi] = xr.DataArray(np.copy(eta) *np.nan, dims=['eta'], coords={'eta': eta, 'x': xi } , name='model_error_x') - - - self.GG['model_error_k_cos'] = xr.concat(model_error_k_cos.values(), dim='x').T - self.GG['model_error_k_sin'] = xr.concat(model_error_k_sin.values(), dim='x').T - - model_error_x = xr.concat(model_error_x.values(), dim='x').T - GG_x= xr.merge([y_model_eta, y_data_eta, model_error_x]) - #model_error_x + model_error_k_cos[xi] = xr.DataArray( + np.zeros(self.k.size) * np.nan, + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_cos", + ) + model_error_k_sin[xi] = xr.DataArray( + np.zeros(self.k.size) * np.nan, + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_sin", + ) + + model_error_x[xi] = xr.DataArray( + np.copy(eta) * np.nan, + dims=["eta"], + coords={"eta": eta, "x": xi}, + name="model_error_x", + ) + + self.GG["model_error_k_cos"] = xr.concat(model_error_k_cos.values(), dim="x").T + self.GG["model_error_k_sin"] = xr.concat(model_error_k_sin.values(), dim="x").T + + model_error_x = xr.concat(model_error_x.values(), dim="x").T + GG_x = xr.merge([y_model_eta, y_data_eta, model_error_x]) + # model_error_x return self.GG, GG_x, params_dataframe - - def calc_var(self): - Gmean = np.nanmean(self.G, 1) infmask = np.isinf(Gmean) @@ -569,57 +676,60 @@ def calc_var(self): # def parceval(self, add_attrs=True ): # return wavenumber_spectrogram.parceval(self, add_attrs= add_attrs ) - def parceval(self, add_attrs=True, weight_data=False ): + def parceval(self, add_attrs=True, weight_data=False): "test Parceval theorem" import copy + DATA = self.data L = self.Lmeters X = self.x # derive mean variances of stancils - #stancil_iter = create_chunk_boundaries_unit_lengths(L, self.xlims, ov= self.ov ) + # stancil_iter = create_chunk_boundaries_unit_lengths(L, self.xlims, ov= self.ov ) def get_stancil_var_apply(stancil): from scipy.signal import detrend + "returns the variance of yy for stancil" - x_mask= (stancil[0] < X) & (X <= stancil[-1]) + x_mask = (stancil[0] < X) & (X <= stancil[-1]) idata = DATA[x_mask] if len(idata) < 1: return stancil[1], np.nan, len(idata) idata = detrend(idata) # weight data - x_pos = (np.round( (X[x_mask] - stancil[0])/ 10.0 , 0) ).astype('int') + x_pos = (np.round((X[x_mask] - stancil[0]) / 10.0, 0)).astype("int") if weight_data: window = self.win[x_pos] - idata = idata * window * np.sqrt( np.var(idata) / np.var(( idata* window) ) ) + idata = ( + idata * window * np.sqrt(np.var(idata) / np.var((idata * window))) + ) return stancil[1], idata.var(), len(idata) - D_vars = list(map(get_stancil_var_apply, copy.copy(self.stancil_iter) )) + D_vars = list(map(get_stancil_var_apply, copy.copy(self.stancil_iter))) - stancil_vars, Nphotons =list(), 0 + stancil_vars, Nphotons = list(), 0 for I in D_vars: - stancil_vars.append(I[1] * I[2]) - Nphotons += I[2] + stancil_vars.append(I[1] * I[2]) + Nphotons += I[2] - stancil_weighted_variance = np.nansum(np.array(stancil_vars))/Nphotons + stancil_weighted_variance = np.nansum(np.array(stancil_vars)) / Nphotons - print('Parcevals Theorem:') - print('variance of timeseries: ', DATA.var()) - print('mean variance of stancils: ', stancil_weighted_variance ) - #print('variance of weighted timeseries: ',self.phi.var() ) - #self.calc_var(self) - print('variance of the optimzed windowed LS Spectrum: ', self.calc_var()) + print("Parcevals Theorem:") + print("variance of timeseries: ", DATA.var()) + print("mean variance of stancils: ", stancil_weighted_variance) + # print('variance of weighted timeseries: ',self.phi.var() ) + # self.calc_var(self) + print("variance of the optimzed windowed LS Spectrum: ", self.calc_var()) if add_attrs: - self.G.attrs['variance_unweighted_data'] = DATA.var() - self.G.attrs['mean_variance_stancils'] = np.nanmean(np.array(stancil_vars) ) - self.G.attrs['mean_variance_LS_pwelch_spectrum'] = self.calc_var() - - - def mean_spectral_error(self, mask=None, confidence = 0.95 ): - return wavenumber_spectrogram.mean_spectral_error(self, mask=mask, confidence= confidence ) - + self.G.attrs["variance_unweighted_data"] = DATA.var() + self.G.attrs["mean_variance_stancils"] = np.nanmean(np.array(stancil_vars)) + self.G.attrs["mean_variance_LS_pwelch_spectrum"] = self.calc_var() + def mean_spectral_error(self, mask=None, confidence=0.95): + return wavenumber_spectrogram.mean_spectral_error( + self, mask=mask, confidence=confidence + ) def complex_represenation(p_hat, M, N_x_full): @@ -635,13 +745,13 @@ def complex_represenation(p_hat, M, N_x_full): this returns a power spectral density with the same variance as the data without gaps. """ - Z = p_hat[0:M] - p_hat[M:] *1j - Z = Z * (N_x_full/2+1) # this + Z = p_hat[0:M] - p_hat[M:] * 1j + Z = Z * (N_x_full / 2 + 1) # this return Z -def Z_to_power_gFT(Z, dk, N_x, N_x_full): - """ compute the 1d Power spectral density of a field Z +def Z_to_power_gFT(Z, dk, N_x, N_x_full): + """compute the 1d Power spectral density of a field Z inputs: Z complex fourier coefficients, output of .complex_represenation method dk delta wavenumber asssuming Z is on regular grid @@ -654,13 +764,13 @@ def Z_to_power_gFT(Z, dk, N_x, N_x_full): prefer spec_complete """ - spec = 2.*(Z*Z.conj()).real + spec = 2.0 * (Z * Z.conj()).real - neven = True if (N_x_full%2) else False + neven = True if (N_x_full % 2) else False # the zeroth frequency should be counted only once - spec[0] = spec[0]/2. + spec[0] = spec[0] / 2.0 if neven: - spec[-1] = spec[-1]/2. + spec[-1] = spec[-1] / 2.0 # spectral density respesenting the incomplete data ( [p_hat]^2 / dk) spec_incomplete = spec / dk / N_x / N_x_full @@ -669,8 +779,9 @@ def Z_to_power_gFT(Z, dk, N_x, N_x_full): return spec_incomplete, spec_complete -def power_from_model(p_hat, dk, M, N_x, N_x_full): - """ compute the 1d Power spectral density from the model coefficients in p_hat + +def power_from_model(p_hat, dk, M, N_x, N_x_full): + """compute the 1d Power spectral density from the model coefficients in p_hat p_hat is the model coefficient matrix M size of the model vector/2, size of k @@ -683,46 +794,44 @@ def power_from_model(p_hat, dk, M, N_x, N_x_full): """ Z = complex_represenation(p_hat, M, N_x_full) - spec, _ = Z_to_power_gFT(Z, dk, N_x, N_x_full) # use spec_incomplete + spec, _ = Z_to_power_gFT(Z, dk, N_x, N_x_full) # use spec_incomplete # spectral density respesenting the incomplete data return spec - class generalized_Fourier(object): def __init__(self, x, ydata, k): - """ non_dimensionalize (bool, default=True) if True, then the data and R_data_uncertainty is non-dimensionalized by the std of the data """ import numpy as np from numpy import linalg - self.x, self.ydata, self.k = x, ydata, k - self.M = self.k.size # number of wavenumbers - self.N = self.x.size# number of datapoints + self.M = self.k.size # number of wavenumbers + self.N = self.x.size # number of datapoints # if self.non_dimensionalize: # self.ydata_star = (self.ydata - self.ydata_mean)/self.ydata_std # else: - #self.ydata_star = self.ydata + # self.ydata_star = self.ydata if ydata is not None: - - self.ydata_var = self.ydata.var() - self.ydata_mean = self.ydata.mean() + self.ydata_var = self.ydata.var() + self.ydata_mean = self.ydata.mean() # test if the data is real, not nan and not inf - assert np.isrealobj(self.ydata), 'data is not real' - assert np.isfinite(self.ydata).all(), 'data is not finite' - assert np.isnan(self.ydata).all() == False, 'data is not nan' + assert np.isrealobj(self.ydata), "data is not real" + assert np.isfinite(self.ydata).all(), "data is not finite" + assert np.isnan(self.ydata).all() == False, "data is not nan" # data matrix - def get_H(self, xx = None): + def get_H(self, xx=None): xx = self.x if xx is None else xx - self.H = np.vstack([ np.cos(np.outer(xx, self.k)).T , np.sin(np.outer(xx, self.k)).T ] ).T - return self.H + self.H = np.vstack( + [np.cos(np.outer(xx, self.k)).T, np.sin(np.outer(xx, self.k)).T] + ).T + return self.H def define_problem(self, P_weight, R_data_uncertainty): """ @@ -730,22 +839,23 @@ def define_problem(self, P_weight, R_data_uncertainty): if P = 0, then the corresponding wavenumber is not penalized, not weighted if P != 0, then the corresponding wavenumber is penalized, i.e. it is put more weight on it. data_uncertainty (observed) uncertainy of the datain units of the data , can be vector of length N, or scaler - """ + """ - self.H = self.get_H() - #self.P = np.diag(1/penalties) # penalty 2M x 2M - #self.R = np.diag( data_uncertainty) #Noise Prior N x N - self.P_1d = np.concatenate([ P_weight , P_weight ]) # these are weights again .. - self.R_1d = R_data_uncertainty + self.H = self.get_H() + # self.P = np.diag(1/penalties) # penalty 2M x 2M + # self.R = np.diag( data_uncertainty) #Noise Prior N x N + self.P_1d = np.concatenate([P_weight, P_weight]) # these are weights again .. + self.R_1d = R_data_uncertainty def solve(self): from numpy import linalg + inv = linalg.inv """ solves the linear inverse problem, return hessian and p_hat self.p_hat = is also non-dimensional """ - + # standard inversion # H = self.H # P = self.P @@ -764,10 +874,10 @@ def solve(self): R_1d = self.R_1d y = self.ydata - H_T_R_inv = H.T * (1/R_1d) - Hess = (H_T_R_inv @ H ) + np.diag(1/P_1d) - Hess_inv = inv(Hess) - p_hat = Hess_inv @ H_T_R_inv @ y + H_T_R_inv = H.T * (1 / R_1d) + Hess = (H_T_R_inv @ H) + np.diag(1 / P_1d) + Hess_inv = inv(Hess) + p_hat = Hess_inv @ H_T_R_inv @ y self.Hess, self.Hess_inv, self.p_hat = Hess, Hess_inv, p_hat del H_T_R_inv @@ -782,15 +892,15 @@ def solve(self): # if 'p_hat' not in self.__dict__: # raise ValueError('p_hat does not exist, please invert for model first') # return self.model_nondimensional() * self.ydata_std + self.ydata_mean - + def model(self): - " returns the model dimensional units" - if 'p_hat' not in self.__dict__: - raise ValueError('p_hat does not exist, please invert for model first') + "returns the model dimensional units" + if "p_hat" not in self.__dict__: + raise ValueError("p_hat does not exist, please invert for model first") return (self.p_hat * self.H).sum(1) def parceval(self, dk, Nx_full): - """ compute the 1d Power spectral density from the model coefficients in p_hat + """compute the 1d Power spectral density from the model coefficients in p_hat p_hat is the model coefficient matrix M size of the model vector/2, size of k @@ -805,78 +915,83 @@ def parceval(self, dk, Nx_full): p_hat = self.p_hat M = self.M Nx = self.N - - Z = complex_represenation(p_hat, M, Nx_full ) - spec_incomplete, spec_complete = Z_to_power_gFT(Z, dk, Nx, Nx_full) # use spec_incomplete + + Z = complex_represenation(p_hat, M, Nx_full) + spec_incomplete, spec_complete = Z_to_power_gFT( + Z, dk, Nx, Nx_full + ) # use spec_incomplete var_spec_incomplete = np.trapz(spec_incomplete, x=self.k) var_spec_complete = np.trapz(spec_complete, x=self.k) # calculate adjustment factor forspectral density - model_var =self.model().var() - spec_adjust = max( var_spec_incomplete / model_var, model_var / var_spec_incomplete ) + model_var = self.model().var() + spec_adjust = max( + var_spec_incomplete / model_var, model_var / var_spec_incomplete + ) pars = { - 'model_var': model_var, - 'var_spec_incomplete': var_spec_incomplete, - 'var_spec_complete': var_spec_complete, - 'spec_adjust': spec_adjust - } + "model_var": model_var, + "var_spec_incomplete": var_spec_incomplete, + "var_spec_complete": var_spec_complete, + "spec_adjust": spec_adjust, + } # spectral density respesenting the incomplete data return pars - - def get_stats(self, dk, Nx_full, print_flag=False): - #model_error_k = np.diag(self.Hess_inv) - #model_error_real = ((self.H**2) @ self.Hess_inv).sum(1) + def get_stats(self, dk, Nx_full, print_flag=False): + # model_error_k = np.diag(self.Hess_inv) + # model_error_real = ((self.H**2) @ self.Hess_inv).sum(1) residual = self.ydata - self.model() Lmeters = self.x[-1] - self.x[0] pars = { - 'data_var': self.ydata.var(), - 'model_var': self.model().var(), - 'residual_var' : residual.var(), - #'normalized_residual' : residual.var() /self.R_1d.mean(), - 'model_error_k' : np.diag(self.Hess_inv), - 'model_error_x' : ((self.H**2) @ self.Hess_inv).sum(1), - 'var_sum' : self.ydata.var() - self.model().var() -residual.var() + "data_var": self.ydata.var(), + "model_var": self.model().var(), + "residual_var": residual.var(), + #'normalized_residual' : residual.var() /self.R_1d.mean(), + "model_error_k": np.diag(self.Hess_inv), + "model_error_x": ((self.H**2) @ self.Hess_inv).sum(1), + "var_sum": self.ydata.var() - self.model().var() - residual.var(), } pars2 = self.parceval(dk, Nx_full) - for k,I in pars2.items(): + for k, I in pars2.items(): pars[k] = I if print_flag: - for ki in ['data_var', 'model_var', 'residual_var','var_sum', 'var_spec_incomplete', 'var_spec_complete', 'spec_adjust']: - print( ki.ljust(20) + str(pars[ki]) ) + for ki in [ + "data_var", + "model_var", + "residual_var", + "var_sum", + "var_spec_incomplete", + "var_spec_complete", + "spec_adjust", + ]: + print(ki.ljust(20) + str(pars[ki])) return pars - class get_prior_spec(object): def __init__(self, freq, data): - - - """ - - """ + """ """ import numpy as np import lmfit as LM - self.LM =LM + + self.LM = LM self.data = data self.freq = freq - if self.freq[0] ==0: - - self.freq_cut_flag= True + if self.freq[0] == 0: + self.freq_cut_flag = True self.freq, self.data = self.freq[1:], self.data[1:] else: - self.freq_cut_flag= False + self.freq_cut_flag = False def set_parameters(self, flim=None): - """ sets parameters fpr optimization setf.freq freq grid used for optimization @@ -888,95 +1003,101 @@ def set_parameters(self, flim=None): """ import numpy as np - params = self.LM.Parameters() - def get_peak_pos(y, smooth =30): + params = self.LM.Parameters() - yy =self.runningmean(y, smooth, tailcopy=False) + def get_peak_pos(y, smooth=30): + yy = self.runningmean(y, smooth, tailcopy=False) yy[np.isnan(yy)] = 0 return yy.argmax() if flim is not None: - iflim= abs(self.freq - flim).argmin() - f_max = self.freq[0:iflim][ get_peak_pos(abs(self.data[0:iflim]), 30) ] + iflim = abs(self.freq - flim).argmin() + f_max = self.freq[0:iflim][get_peak_pos(abs(self.data[0:iflim]), 30)] else: - f_max = self.freq[ get_peak_pos(abs(self.data), 30) ] + f_max = self.freq[get_peak_pos(abs(self.data), 30)] self.f_max = f_max # p_smothed = self.runningmean(np.abs(self.Z ), 20, tailcopy=True) # f_max = self.freq[p_smothed[~np.isnan(p_smothed)].argmax()] - params.add('f_max', f_max , min=f_max*0.2, max=f_max*1.5, vary=True) - params.add('amp', 0.05 , min=.0001, max=.1, vary=True) - params.add('gamma', 1 , min=1, max=3.3, vary=False) - params.add('alpha', 1 , min=0, max= 0.95 * np.pi /2, vary=True) + params.add("f_max", f_max, min=f_max * 0.2, max=f_max * 1.5, vary=True) + params.add("amp", 0.05, min=0.0001, max=0.1, vary=True) + params.add("gamma", 1, min=1, max=3.3, vary=False) + params.add("alpha", 1, min=0, max=0.95 * np.pi / 2, vary=True) self.params = params return params def model_func(self, f, params): - return self.non_dim_spec_model(f, params['f_max'].value, params['amp'].value, params['gamma'].value) + return self.non_dim_spec_model( + f, params["f_max"].value, params["amp"].value, params["gamma"].value + ) - def non_dim_spec_model(self, f, f_max, amp, gamma=1, angle_rad = 0): + def non_dim_spec_model(self, f, f_max, amp, gamma=1, angle_rad=0): import icesat2_tracks.local_modules.JONSWAP_gamma as spectal_models - U= 20 # results are incensitive to U + + U = 20 # results are incensitive to U f_true = f * np.cos(angle_rad) - model= spectal_models.JONSWAP_default_alt(f_true, f_max, 20 , gamma=gamma) - model = amp * model/np.nanmean(model) - model[np.isnan(model)] =0 + model = spectal_models.JONSWAP_default_alt(f_true, f_max, 20, gamma=gamma) + model = amp * model / np.nanmean(model) + model[np.isnan(model)] = 0 return model - def objective_func(self,params, data, model_func, freq, weight= None): - f_weight = np.linspace(1, .1, freq.size) + def objective_func(self, params, data, model_func, freq, weight=None): + f_weight = np.linspace(1, 0.1, freq.size) model = model_func(freq, params) - cost =( abs(data - model) * f_weight / data.std() )**2 - return cost.sum() + 4 *np.abs(self.f_max -params['f_max'])**2 / self.f_max - - + cost = (abs(data - model) * f_weight / data.std()) ** 2 + return cost.sum() + 4 * np.abs(self.f_max - params["f_max"]) ** 2 / self.f_max def test_ojective_func(self, model_func): return self.objective_func(self.params, self.data, model_func, self.freq) - - def optimize(self, fitting_args= None , method='dual_annealing', max_nfev=None): - - + def optimize(self, fitting_args=None, method="dual_annealing", max_nfev=None): if fitting_args is None: fitting_args = (self.data, self.model_func, self.freq) - #fit_kws= {'maxfun': 1e7} - fit_kws= {'maxfun': 1e5} + # fit_kws= {'maxfun': 1e7} + fit_kws = {"maxfun": 1e5} self.weight_func = fitting_args[1] - self.fitter = self.LM.minimize(self.objective_func, self.params, args=fitting_args, method=method, max_nfev=max_nfev, **fit_kws) + self.fitter = self.LM.minimize( + self.objective_func, + self.params, + args=fitting_args, + method=method, + max_nfev=max_nfev, + **fit_kws, + ) return self.fitter def plot_data(self): import matplotlib.pyplot as plt - plt.plot(self.freq, self.data, 'k') + + plt.plot(self.freq, self.data, "k") def plot_model(self, pars): import matplotlib.pyplot as plt - plt.plot(self.freq, self.model_func(self.freq, pars), 'b--' ) + + plt.plot(self.freq, self.model_func(self.freq, pars), "b--") def runningmean(self, var, m, tailcopy=False): - m=int(m) - s =var.shape - if s[0] <= 2*m: - print('0 Dimension is smaller then averaging length') + m = int(m) + s = var.shape + if s[0] <= 2 * m: + print("0 Dimension is smaller then averaging length") return - rr=np.asarray(var)*np.nan - #print(type(rr)) - var_range=np.arange(m,int(s[0])-m-1,1) - for i in var_range[np.isfinite(var[m:int(s[0])-m-1])]: - #rm.append(var[i-m:i+m].mean()) - rr[int(i)]=np.nanmean(var[i-m:i+m]) + rr = np.asarray(var) * np.nan + # print(type(rr)) + var_range = np.arange(m, int(s[0]) - m - 1, 1) + for i in var_range[np.isfinite(var[m : int(s[0]) - m - 1])]: + # rm.append(var[i-m:i+m].mean()) + rr[int(i)] = np.nanmean(var[i - m : i + m]) if tailcopy: - rr[0:m]=rr[m+1] - rr[-m-1:-1]=rr[-m-2] + rr[0:m] = rr[m + 1] + rr[-m - 1 : -1] = rr[-m - 2] return rr - - def create_weight(self,freq=None, plot_flag=True, flim= None, max_nfev=None): + def create_weight(self, freq=None, plot_flag=True, flim=None, max_nfev=None): """ this returns a weight function that can be used for the Least square fitting. """ @@ -985,7 +1106,7 @@ def create_weight(self,freq=None, plot_flag=True, flim= None, max_nfev=None): ff = self.freq else: ff = freq - if 'params' not in self.__dict__: + if "params" not in self.__dict__: self.set_parameters(flim=flim) self.optimize(max_nfev=max_nfev) @@ -995,7 +1116,6 @@ def create_weight(self,freq=None, plot_flag=True, flim= None, max_nfev=None): self.plot_data() self.plot_model(self.fitter.params) - result = self.model_func(ff, self.fitter.params) if self.freq_cut_flag and freq is None: result = np.insert(result, 0, 0) diff --git a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py index 6d6f5754..94deba31 100644 --- a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py +++ b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py @@ -1,6 +1,4 @@ -# %% import os, sys -#execfile(os.environ['PYTHONSTARTUP']) """ This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. @@ -11,502 +9,511 @@ mconfig, xr, color_schemes, - font_for_pres, plt, np, - font_for_print + font_for_print, ) -#%matplotlib inline - -import icesat2_tracks.ICEsat2_SI_tools.convert_GPS_time as cGPS -import h5py import icesat2_tracks.ICEsat2_SI_tools.io as io import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec import time -import imp -import copy -import icesat2_tracks.ICEsat2_SI_tools.spicke_remover -import datetime from matplotlib.gridspec import GridSpec import icesat2_tracks.ICEsat2_SI_tools.generalized_FT as gFT from scipy.ndimage.measurements import label import icesat2_tracks.local_modules.m_tools_ph3 as MT from icesat2_tracks.local_modules import m_general_ph3 as M -#import s3fs -# %% -track_name, batch_key, test_flag = io.init_from_input(sys.argv) # loads standard experiment -#track_name, batch_key, test_flag = '20190605061807_10380310_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190601094826_09790312_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190207111114_06260210_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190208152826_06440210_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190213133330_07190212_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190207002436_06190212_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190206022433_06050212_004_01', 'SH_batch02', False - -#track_name, batch_key, test_flag = 'SH_20190101_00570212', 'SH_batch04', True -#track_name, batch_key, test_flag = 'SH_20190219_08070210', 'SH_batchminimal', True - +track_name, batch_key, test_flag = io.init_from_input( + sys.argv +) # loads standard experiment +hemis, batch = batch_key.split("_") -#track_name, batch_key, test_flag = '20190219073735_08070210_004_01', 'SH_batch02', False -#print(track_name, batch_key, test_flag) -hemis, batch = batch_key.split('_') - -load_path = mconfig['paths']['work'] +batch_key+'/B02_spectra/' -load_file = load_path + 'B02_' + track_name #+ '.nc' -plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/' + track_name + '/' +load_path = mconfig["paths"]["work"] + batch_key + "/B02_spectra/" +load_file = load_path + "B02_" + track_name # + '.nc' +plot_path = ( + mconfig["paths"]["plot"] + "/" + hemis + "/" + batch_key + "/" + track_name + "/" +) MT.mkdirs_r(plot_path) -Gk = xr.open_dataset(load_file+'_gFT_k.nc') -Gx = xr.open_dataset(load_file+'_gFT_x.nc') +Gk = xr.open_dataset(load_file + "_gFT_k.nc") +Gx = xr.open_dataset(load_file + "_gFT_x.nc") -Gfft = xr.open_dataset(load_file+'_FFT.nc') -# print(Gk) -# print(Gx) +Gfft = xr.open_dataset(load_file + "_FFT.nc") time.sleep(2) -# %% -# for ibeam in Gk.beam: -# print(Gk.sel(beam=ibeam).gFT_PSD_data.data) - -# %% -all_beams = mconfig['beams']['all_beams'] -high_beams = mconfig['beams']['high_beams'] -low_beams = mconfig['beams']['low_beams'] -#Gfilt = io.load_pandas_table_dict(track_name + '_B01_regridded', load_path) # rhis is the rar photon data -#Gd = io.load_pandas_table_dict(track_name + '_B01_binned' , load_path) # +all_beams = mconfig["beams"]["all_beams"] +high_beams = mconfig["beams"]["high_beams"] +low_beams = mconfig["beams"]["low_beams"] color_schemes.colormaps2(21) -# %% check paths (again) - -col_dict= color_schemes.rels -F = M.figure_axis_xy(9, 3, view_scale =0.5) +col_dict = color_schemes.rels +F = M.figure_axis_xy(9, 3, view_scale=0.5) -plt.subplot(1,3, 1) -plt.title(track_name , loc ='left') +plt.subplot(1, 3, 1) +plt.title(track_name, loc="left") for k in all_beams: I = Gk.sel(beam=k) I2 = Gx.sel(beam=k) - plt.plot(I['lon'], I['lat'], '.', c= col_dict[k], markersize = 0.7, linewidth =0.3) - plt.plot(I2['lon'], I2['lat'], '|', c= col_dict[k], markersize = 0.7 ) + plt.plot(I["lon"], I["lat"], ".", c=col_dict[k], markersize=0.7, linewidth=0.3) + plt.plot(I2["lon"], I2["lat"], "|", c=col_dict[k], markersize=0.7) -plt.xlabel('lon') -plt.ylabel('lat') +plt.xlabel("lon") +plt.ylabel("lat") -plt.subplot(1,3, 2) +plt.subplot(1, 3, 2) -xscale= 1e3 +xscale = 1e3 for k in all_beams: I = Gk.sel(beam=k) - plt.plot( I['x_coord']/xscale , I['y_coord']/xscale, '.' , c= col_dict[k] , linewidth = 0.8, markersize = 0.8 ) - # I2 = G_gFT[k] - # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) + plt.plot( + I["x_coord"] / xscale, + I["y_coord"] / xscale, + ".", + c=col_dict[k], + linewidth=0.8, + markersize=0.8, + ) -plt.xlabel('x_coord (km)') -plt.ylabel('y_coord (km)') +plt.xlabel("x_coord (km)") +plt.ylabel("y_coord (km)") -plt.subplot(1,3, 3) +plt.subplot(1, 3, 3) -xscale= 1e3 +xscale = 1e3 for k in all_beams: I = Gk.sel(beam=k) - plt.plot( I['x_coord']/xscale , (I['y_coord']-I['y_coord'][0]), '.' , c= col_dict[k], linewidth = 0.8, markersize = 0.8) - # I2 = G_gFT[k] - # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) + plt.plot( + I["x_coord"] / xscale, + (I["y_coord"] - I["y_coord"][0]), + ".", + c=col_dict[k], + linewidth=0.8, + markersize=0.8, + ) -plt.xlabel('x_coord (km)') -plt.ylabel('y_coord deviation (m)') +plt.xlabel("x_coord (km)") +plt.ylabel("y_coord deviation (m)") -F.save_light(path=plot_path, name = 'B03_specs_coord_check') +F.save_light(path=plot_path, name="B03_specs_coord_check") -# %% def dict_weighted_mean(Gdict, weight_key): """ returns the weighted meean of a dict of xarray, data_arrays weight_key must be in the xr.DataArrays """ - #Gdict = G_rar_fft - #weight_key='N_per_stancil' - akey = list( Gdict.keys() )[0] + akey = list(Gdict.keys())[0] GSUM = Gdict[akey].copy() - GSUM.data = np.zeros(GSUM.shape) + GSUM.data = np.zeros(GSUM.shape) N_per_stancil = GSUM.N_per_stancil * 0 - N_photons = np.zeros(GSUM.N_per_stancil.size) + N_photons = np.zeros(GSUM.N_per_stancil.size) - counter= 0 - for k,I in Gdict.items(): - #print(k) - I =I.squeeze() - print(len(I.x) ) - if len(I.x) !=0: - GSUM += I.where( ~np.isnan(I), 0) * I[weight_key] #.sel(x=GSUM.x) - N_per_stancil += I[weight_key] - if 'N_photons' in GSUM.coords: - N_photons += I['N_photons'] - counter+=1 + counter = 0 + for I in Gdict.items(): + I = I.squeeze() + if len(I.x) != 0: + GSUM += I.where(~np.isnan(I), 0) * I[weight_key] # .sel(x=GSUM.x) + N_per_stancil += I[weight_key] + if "N_photons" in GSUM.coords: + N_photons += I["N_photons"] + counter += 1 - GSUM = GSUM / N_per_stancil + GSUM = GSUM / N_per_stancil - if 'N_photons' in GSUM.coords: - GSUM.coords['N_photons'] = (('x', 'beam'), np.expand_dims(N_photons, 1) ) + if "N_photons" in GSUM.coords: + GSUM.coords["N_photons"] = (("x", "beam"), np.expand_dims(N_photons, 1)) - GSUM['beam'] = ['weighted_mean'] - GSUM.name='power_spec' + GSUM["beam"] = ["weighted_mean"] + GSUM.name = "power_spec" return GSUM -G_gFT_wmean = (Gk['gFT_PSD_data'].where( ~np.isnan(Gk['gFT_PSD_data']), 0) * Gk['N_per_stancil']).sum('beam')/ Gk['N_per_stancil'].sum('beam') -G_gFT_wmean['N_per_stancil'] = Gk['N_per_stancil'].sum('beam') - -G_fft_wmean = (Gfft.where( ~np.isnan(Gfft), 0) * Gfft['N_per_stancil']).sum('beam')/ Gfft['N_per_stancil'].sum('beam') -G_fft_wmean['N_per_stancil'] = Gfft['N_per_stancil'].sum('beam') +G_gFT_wmean = ( + Gk["gFT_PSD_data"].where(~np.isnan(Gk["gFT_PSD_data"]), 0) * Gk["N_per_stancil"] +).sum("beam") / Gk["N_per_stancil"].sum("beam") +G_gFT_wmean["N_per_stancil"] = Gk["N_per_stancil"].sum("beam") +G_fft_wmean = (Gfft.where(~np.isnan(Gfft), 0) * Gfft["N_per_stancil"]).sum( + "beam" +) / Gfft["N_per_stancil"].sum("beam") +G_fft_wmean["N_per_stancil"] = Gfft["N_per_stancil"].sum("beam") -# %% plot -def plot_wavenumber_spectrogram(ax, Gi, clev, title= None, plot_photon_density=True ): - if Gi.k[0] ==0: - Gi= Gi.sel(k=Gi.k[1:]) - x_lambda= 2 * np.pi/Gi.k - plt.pcolormesh(Gi.x/1e3, x_lambda , Gi, cmap=plt.cm.ocean_r , vmin = clev[0], vmax = clev[-1]) +def plot_wavenumber_spectrogram(ax, Gi, clev, title=None, plot_photon_density=True): + if Gi.k[0] == 0: + Gi = Gi.sel(k=Gi.k[1:]) + x_lambda = 2 * np.pi / Gi.k + plt.pcolormesh( + Gi.x / 1e3, x_lambda, Gi, cmap=plt.cm.ocean_r, vmin=clev[0], vmax=clev[-1] + ) - ax.set_yscale('log') - # plt.colorbar(orientation='vertical', pad=0.06, label='Spectral Power (m^2/m)') + ax.set_yscale("log") if plot_photon_density: + plt.plot( + Gi.x / 1e3, + x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, + c="black", + linewidth=0.8, + label="NAN-density", + ) + plt.fill_between( + Gi.x / 1e3, + x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, + 0, + color="gray", + alpha=0.3, + ) + ax.axhline(30, color="black", linewidth=0.3) - plt.plot(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10 , c='black', linewidth= 0.8, label='NAN-density' ) - plt.fill_between(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10, 0, color='gray', alpha = 0.3) - ax.axhline(30, color='black', linewidth=0.3) - - #plt.xlabel('Distance from the Ice Edge (km)') plt.ylim(x_lambda[-1], x_lambda[0]) - plt.title(title, loc='left') + plt.title(title, loc="left") + -#Gplot = G.rolling(x=5, min_periods= 1, center=True).mean() -#Gmean = G_gFT_wmean.rolling(x=2, min_periods= 1, center=True).mean() Gmean = G_gFT_wmean.rolling(k=5, center=True).mean() -#Gmean = Gmean.where(~np.isnan(Gmean), 0) + try: - k_max_range = Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1.25 + k_max_range = ( + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 0.75, + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1, + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1.25, + ) except: - k_max_range = Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1.25 - + k_max_range = ( + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 0.75, + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1, + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1.25, + ) -# %% font_for_print() -F = M.figure_axis_xy(6.5, 5.6, container= True, view_scale =1) +F = M.figure_axis_xy(6.5, 5.6, container=True, view_scale=1) Lmeters = Gk.L.data[0] -plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) -gs = GridSpec(3,3, wspace=0.2, hspace=.5)#figure=fig, -#clev=np.arange(0, 6, 0.1)*3 - -#%matplotlib inline +plt.suptitle("gFT Slope Spectrograms\n" + track_name, y=0.98) +gs = GridSpec(3, 3, wspace=0.2, hspace=0.5) -# define mean first for colorbar -Gplot = G_gFT_wmean.squeeze().rolling(k=10, min_periods= 1, center=True).median().rolling(x=3, min_periods= 1, center=True).median() +Gplot = ( + G_gFT_wmean.squeeze() + .rolling(k=10, min_periods=1, center=True) + .median() + .rolling(x=3, min_periods=1, center=True) + .median() +) dd = 10 * np.log10(Gplot) -dd= dd.where(~np.isinf(dd), np.nan ) -clev_log = M.clevels( [dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31)* 1 - -#clev = M.clevels( [Gmean.quantile(0.6).data * 1e4, Gmean.quantile(0.99).data * 1e4], 31)/ 1e4 +dd = dd.where(~np.isinf(dd), np.nan) +clev_log = M.clevels([dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31) * 1 -xlims= Gmean.x[0]/1e3, Gmean.x[-1]/1e3 +xlims = Gmean.x[0] / 1e3, Gmean.x[-1] / 1e3 -k =high_beams[0] -for pos, k, pflag in zip([gs[0, 0],gs[0, 1],gs[0, 2] ], high_beams, [True, False, False] ): +k = high_beams[0] +for pos, k, pflag in zip( + [gs[0, 0], gs[0, 1], gs[0, 2]], high_beams, [True, False, False] +): ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() - #Gplot.mean('x').plot() + Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() dd2 = 10 * np.log10(Gplot) - dd2= dd2.where(~np.isinf(dd2), np.nan ) - plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k + ' unsmoothed', plot_photon_density=True ) + dd2 = dd2.where(~np.isinf(dd2), np.nan) + plot_wavenumber_spectrogram( + ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True + ) plt.xlim(xlims) - # if pflag: - plt.ylabel('Wave length\n(meters)') + plt.ylabel("Wave length\n(meters)") plt.legend() -for pos, k, pflag in zip([gs[1, 0],gs[1, 1],gs[1, 2] ], low_beams, [True, False, False] ): +for pos, k, pflag in zip( + [gs[1, 0], gs[1, 1], gs[1, 2]], low_beams, [True, False, False] +): ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() - #Gplot.mean('x').plot() + Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() dd2 = 10 * np.log10(Gplot) - dd2= dd2.where(~np.isinf(dd2), np.nan ) - plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k+ ' unsmoothed', plot_photon_density=True ) + dd2 = dd2.where(~np.isinf(dd2), np.nan) + plot_wavenumber_spectrogram( + ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True + ) plt.xlim(xlims) - # if pflag: - plt.ylabel('Wave length\n(meters)') + plt.ylabel("Wave length\n(meters)") plt.legend() ax0 = F.fig.add_subplot(gs[2, 0]) -plot_wavenumber_spectrogram(ax0, dd, clev_log , title ='smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$', plot_photon_density= True) +plot_wavenumber_spectrogram( + ax0, + dd, + clev_log, + title="smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$", + plot_photon_density=True, +) plt.xlim(xlims) -# plt.plot(Gplot.x/1e3, 10* nan_list +20 , c='black', label='NAN-density' ) -# ax0.axhline(30, color='black', linewidth=0.5) - -ax0.axhline(2* np.pi/k_max_range[0], color='red', linestyle= '--', linewidth= 0.5) -ax0.axhline(2* np.pi/k_max_range[1], color='red', linestyle= '-', linewidth= 0.5) -ax0.axhline(2* np.pi/k_max_range[2], color='red', linestyle= '--', linewidth= 0.5) +ax0.axhline(2 * np.pi / k_max_range[0], color="red", linestyle="--", linewidth=0.5) +ax0.axhline(2 * np.pi / k_max_range[1], color="red", linestyle="-", linewidth=0.5) +ax0.axhline(2 * np.pi / k_max_range[2], color="red", linestyle="--", linewidth=0.5) if pflag: - plt.ylabel('Wave length\n(meters)') + plt.ylabel("Wave length\n(meters)") plt.legend() pos = gs[2, 1] ax0 = F.fig.add_subplot(pos) -plt.title('Photons density ($m^{-1}$)', loc='left') +plt.title("Photons density ($m^{-1}$)", loc="left") for k in all_beams: - I = Gk.sel(beam = k)['gFT_PSD_data'] - plt.plot(Gplot.x/1e3, I.N_photons/I.L.data, label=k, linewidth=0.8) -plt.plot(Gplot.x/1e3, G_gFT_wmean.N_per_stancil/3/I.L.data , c='black', label='ave Photons' , linewidth=0.8) + I = Gk.sel(beam=k)["gFT_PSD_data"] + plt.plot(Gplot.x / 1e3, I.N_photons / I.L.data, label=k, linewidth=0.8) +plt.plot( + Gplot.x / 1e3, + G_gFT_wmean.N_per_stancil / 3 / I.L.data, + c="black", + label="ave Photons", + linewidth=0.8, +) plt.xlim(xlims) -plt.xlabel('Distance from the Ice Edge (km)') +plt.xlabel("Distance from the Ice Edge (km)") pos = gs[2, 2] ax0 = F.fig.add_subplot(pos) -ax0.set_yscale('log') +ax0.set_yscale("log") -plt.title('Peak Spectal Power', loc='left') +plt.title("Peak Spectal Power", loc="left") x0 = Gk.x[0].data for k in all_beams: - I = Gk.sel(beam = k)['gFT_PSD_data'] - plt.scatter(I.x.data/1e3, I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k').data , s=0.5, marker='.', color='red', alpha= 0.3) - - I= Gfft.sel(beam = k)#.to_array() - #I= I[:,I.N_per_stancil >= I.N_per_stancil.max().data*0.9] - plt.scatter( (x0 +I.x.data)/1e3, I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , s=0.5, marker='.', c='blue', alpha= 0.3) + I = Gk.sel(beam=k)["gFT_PSD_data"] + plt.scatter( + I.x.data / 1e3, + I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k").data, + s=0.5, + marker=".", + color="red", + alpha=0.3, + ) + I = Gfft.sel(beam=k) + plt.scatter( + (x0 + I.x.data) / 1e3, + I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + s=0.5, + marker=".", + c="blue", + alpha=0.3, + ) + + +Gplot = G_fft_wmean.squeeze() +Gplot = Gplot.power_spec[:, Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data * 0.9] +plt.plot( + (x0 + Gplot.x) / 1e3, + Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + ".", + markersize=1.5, + c="blue", + label="FFT", +) +Gplot = G_gFT_wmean.squeeze() +plt.plot( + Gplot.x / 1e3, + Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + ".", + markersize=1.5, + c="red", + label="gFT", +) -Gplot= G_fft_wmean.squeeze() -Gplot = Gplot.power_spec[:,Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data*0.9] -plt.plot( (x0 + Gplot.x)/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.', markersize=1.5 , c='blue', label= 'FFT') +plt.ylabel("1e-3 $(m)^2~m$") +plt.legend() -Gplot= G_gFT_wmean.squeeze() -plt.plot( Gplot.x/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.' , markersize=1.5, c='red', label= 'gFT') +F.save_light(path=plot_path, name="B03_specs_L" + str(Lmeters)) -plt.ylabel('1e-3 $(m)^2~m$') -plt.legend() -#plt.ylim(Gplot.min()*1.4, Gplot.max()*1.4 ) -#plt.xlim(xlims) +Gk.sel(beam=k).gFT_PSD_data.plot() -F.save_light(path=plot_path, name = 'B03_specs_L'+str(Lmeters)) -# %% -Gk.sel(beam = k).gFT_PSD_data.plot() +def plot_model_eta(D, ax, offset=0, xscale=1e3, **kargs): + eta = D.eta + D.x + y_data = D.y_model + offset + plt.plot(eta / xscale, y_data, **kargs) -# %% define simple routines -def plot_model_eta(D, ax, offset = 0, xscale= 1e3 , **kargs ): - eta = D.eta + D.x - y_data = D.y_model+offset - plt.plot(eta/xscale,y_data , **kargs) + ax.axvline(eta[0].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) + ax.axvline(eta[-1].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) - ax.axvline(eta[0].data/xscale , linewidth=2, color=kargs['color'], alpha=0.5) - ax.axvline(eta[-1].data/xscale, linewidth=2, color=kargs['color'], alpha=0.5) def add_info(D, Dk, ylims): - eta = D.eta + D.x - N_per_stancil, ksize = Dk.N_per_stancil.data , Dk.k.size - plt.text(eta[0].data, ylims[-1], ' N='+numtostr(N_per_stancil) + ' N/2M= '+ fltostr(N_per_stancil/2/ksize, 1) ) - -def plot_data_eta(D, offset = 0,xscale= 1e3 , **kargs ): - eta_1 = D.eta + D.x - y_data = D.y_model +offset - plt.plot(eta_1/xscale,y_data , **kargs) + eta = D.eta + D.x + N_per_stancil, ksize = Dk.N_per_stancil.data, Dk.k.size + plt.text( + eta[0].data, + ylims[-1], + " N=" + + numtostr(N_per_stancil) + + " N/2M= " + + fltostr(N_per_stancil / 2 / ksize, 1), + ) + + +def plot_data_eta(D, offset=0, xscale=1e3, **kargs): + eta_1 = D.eta + D.x + y_data = D.y_model + offset + plt.plot(eta_1 / xscale, y_data, **kargs) return eta_1 -# %% phase examples -### overlapping views -#for i in np.arange(0,29,2): -# i = 4 -# c1= 'blue' -# c2= 'red' -# -# Gx_1 = Gx.isel(x= i).sel(beam = k) -# Gx_2 = Gx.isel(x= i+1).sel(beam = k) -# -# Gk_1 = Gk.isel(x= i).sel(beam = k) -# Gk_2 = Gk.isel(x= i+1).sel(beam = k) -# -# fltostr = MT.float_to_str -# numtostr = MT.num_to_str -# -# #if k%2 ==0: -# font_for_print() -# F = M.figure_axis_xy(9, 5, container =True, view_scale= 0.8) -# -# plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) -# gs = GridSpec(3,4, wspace=0.2, hspace=.5)#figure=fig, -# -# ax0 = F.fig.add_subplot(gs[0, :]) -# -# -# -# plot_model_eta(Gx_1, ax0, linestyle='-', color=c1, linewidth=0.4, alpha=1, zorder=12 ) -# plot_model_eta(Gx_2, ax0, linestyle='-', color=c2, linewidth=0.4, alpha=1, zorder=12 ) -# -# ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 -# -# add_info(Gx_1, Gk_1 , ylims ) -# add_info(Gx_2, Gk_1 , ylims ) -# -# # oringial data -# -# eta_1= plot_data_eta(Gx_1 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) -# eta_2= plot_data_eta(Gx_2 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) -# -# dx = eta_1.diff('eta').mean() -# plt.xlim(eta_1[0].data - 40 * dx, eta_2[-1].data + 40 * dx ) -# plt.ylim(ylims[0], ylims[-1]) -# - -# %% Single views - -def plot_data_eta(D, offset = 0 , **kargs ): - eta_1 = D.eta# + D.x - y_data = D.y_model +offset - plt.plot(eta_1,y_data , **kargs) +def plot_data_eta(D, offset=0, **kargs): + eta_1 = D.eta # + D.x + y_data = D.y_model + offset + plt.plot(eta_1, y_data, **kargs) return eta_1 -def plot_model_eta(D, ax, offset = 0, **kargs ): - eta = D.eta #+ D.x - y_data = D.y_model+offset - plt.plot(eta ,y_data , **kargs) - ax.axvline(eta[0].data, linewidth=0.1, color=kargs['color'], alpha=0.5) - ax.axvline(eta[-1].data, linewidth=0.1, color=kargs['color'], alpha=0.5) +def plot_model_eta(D, ax, offset=0, **kargs): + eta = D.eta # + D.x + y_data = D.y_model + offset + plt.plot(eta, y_data, **kargs) + + ax.axvline(eta[0].data, linewidth=0.1, color=kargs["color"], alpha=0.5) + ax.axvline(eta[-1].data, linewidth=0.1, color=kargs["color"], alpha=0.5) + -if ('y_data' in Gx.sel(beam = 'gt3r').keys()): - print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) +if "y_data" in Gx.sel(beam="gt3r").keys(): + print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) else: - print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) - MT.json_save('B03_fail', plot_path, {'reason':'no y_data'}) - print('failed, exit') + print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) + MT.json_save("B03_fail", plot_path, {"reason": "no y_data"}) + print("failed, exit") exit() - -# %% fltostr = MT.float_to_str numtostr = MT.num_to_str font_for_print() +MT.mkdirs_r(plot_path + "B03_spectra/") -#for i in x_pos_sel[::2]: -#i =x_pos_sel[20] -MT.mkdirs_r(plot_path+'B03_spectra/') - -x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data.data)] -x_pos_max = Gk.mean('beam').mean('k').gFT_PSD_data[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data)].argmax().data -xpp = x_pos_sel[ [int(i) for i in np.round(np.linspace(0, x_pos_sel.size-1, 4))]] +x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data.data)] +x_pos_max = ( + Gk.mean("beam") + .mean("k") + .gFT_PSD_data[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data)] + .argmax() + .data +) +xpp = x_pos_sel[[int(i) for i in np.round(np.linspace(0, x_pos_sel.size - 1, 4))]] xpp = np.insert(xpp, 0, x_pos_max) for i in xpp: + F = M.figure_axis_xy(6, 8, container=True, view_scale=0.8) - #i = xpp[0] - F = M.figure_axis_xy(6, 8, container =True, view_scale= 0.8) - - plt.suptitle('gFT Model and Spectrograms | x='+str(Gk.x[i].data)+' \n' + track_name, y = 0.95) - gs = GridSpec(5,6, wspace=0.2, hspace=0.7)#figure=fig, + plt.suptitle( + "gFT Model and Spectrograms | x=" + str(Gk.x[i].data) + " \n" + track_name, + y=0.95, + ) + gs = GridSpec(5, 6, wspace=0.2, hspace=0.7) ax0 = F.fig.add_subplot(gs[0:2, :]) - col_d = color_schemes.__dict__['rels'] + col_d = color_schemes.__dict__["rels"] neven = True offs = 0 for k in all_beams: - - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) - - plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) - ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 - #add_info(Gx_1, Gk_1 , ylims ) + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) + + plot_model_eta( + Gx_1, + ax0, + offset=offs, + linestyle="-", + color=col_d[k], + linewidth=0.4, + alpha=1, + zorder=12, + ) + ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 # oringial data - eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) + eta_1 = plot_data_eta( + Gx_1, offset=offs, linestyle="-", c="k", linewidth=1, alpha=0.5, zorder=11 + ) # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) _ = FT.get_H() - FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) - plt.plot(Gx_1.eta, FT.model()+offs ,'-', c='orange', linewidth=0.3, alpha=1,zorder= 2) + FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) + plt.plot( + Gx_1.eta, + FT.model() + offs, + "-", + c="orange", + linewidth=0.3, + alpha=1, + zorder=2, + ) if neven: neven = False - offs += .3 + offs += 0.3 else: neven = True - offs +=0.6 - + offs += 0.6 - dx = eta_1.diff('eta').mean().data + dx = eta_1.diff("eta").mean().data - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks/1e3) - plt.xlim( eta_1[0].data - 40 * dx, eta_1[-1].data+ 40 * dx ) - plt.title('Model reconst.', loc ='left') - - - plt.ylabel('relative slope (m/m)') - plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') + ax0.set_xticklabels(eta_ticks / 1e3) + plt.xlim(eta_1[0].data - 40 * dx, eta_1[-1].data + 40 * dx) + plt.title("Model reconst.", loc="left") + plt.ylabel("relative slope (m/m)") + plt.xlabel( + "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" + ) # spectra # define threshold k_thresh = 0.085 ax1_list = list() - dd_max=list() - for pos, kgroup, lflag in zip([ gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], [['gt1l', 'gt1r'], ['gt2l', 'gt2r'], ['gt3l', 'gt3r']], [True, False, False] ): - + dd_max = list() + for pos, kgroup, lflag in zip( + [gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], + [["gt1l", "gt1r"], ["gt2l", "gt2r"], ["gt3l", "gt3r"]], + [True, False, False], + ): ax11 = F.fig.add_subplot(pos) ax11.tick_params(labelleft=lflag) ax1_list.append(ax11) for k in kgroup: + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) + klim = Gk_1.k[0], Gk_1.k[-1] - klim= Gk_1.k[0], Gk_1.k[-1] + if "l" in k: + dd = Gk_1.gFT_PSD_data + plt.plot(Gk_1.k, dd, color="gray", linewidth=0.5, alpha=0.5) - if 'l' in k: - dd = Gk_1.gFT_PSD_data#.rolling(k=10, min_periods= 1, center=True).mean() - plt.plot(Gk_1.k, dd, color='gray', linewidth=.5 ,alpha= 0.5 ) - - dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods= 1, center=True).mean() - plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=.8 ) + dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods=1, center=True).mean() + plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=0.8) dd_max.append(np.nanmax(dd.data)) plt.xlim(klim) if lflag: - plt.ylabel('$(m/m)^2/k$') - plt.title('Energy Spectra', loc ='left') - - plt.xlabel('wavenumber k (2$\pi$ m$^{-1}$)') + plt.ylabel("$(m/m)^2/k$") + plt.title("Energy Spectra", loc="left") - #plt.ylim(dd.min(), max(dd_max) * 1.1) + plt.xlabel("wavenumber k (2$\pi$ m$^{-1}$)") - ax11.axvline(k_thresh, linewidth=1, color='gray', alpha=1) - ax11.axvspan(k_thresh , klim[-1], color='gray', alpha=0.5, zorder=12) + ax11.axvline(k_thresh, linewidth=1, color="gray", alpha=1) + ax11.axvspan(k_thresh, klim[-1], color="gray", alpha=0.5, zorder=12) if ~np.isnan(np.nanmax(dd_max)): for ax in ax1_list: @@ -517,48 +524,58 @@ def plot_model_eta(D, ax, offset = 0, **kargs ): neven = True offs = 0 for k in all_beams: + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) - - #plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) - ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 - #add_info(Gx_1, Gk_1 , ylims ) + ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 # oringial data - eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1.5, alpha =0.5, zorder=11) + eta_1 = plot_data_eta( + Gx_1, offset=offs, linestyle="-", c="k", linewidth=1.5, alpha=0.5, zorder=11 + ) # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) _ = FT.get_H() - FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) + FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) - p_hat_k = np.concatenate([ Gk_1.k, Gk_1.k ]) + p_hat_k = np.concatenate([Gk_1.k, Gk_1.k]) k_mask = p_hat_k < k_thresh FT.p_hat[~k_mask] = 0 - plt.plot(Gx_1.eta, FT.model()+offs ,'-', c=col_d[k], linewidth=0.8, alpha=1,zorder= 12) + plt.plot( + Gx_1.eta, + FT.model() + offs, + "-", + c=col_d[k], + linewidth=0.8, + alpha=1, + zorder=12, + ) if neven: neven = False - offs += .3 + offs += 0.3 else: neven = True - offs +=0.6 + offs += 0.6 - dx = eta_1.diff('eta').mean().data + dx = eta_1.diff("eta").mean().data - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks/1e3) - plt.xlim( eta_1[1000].data - 40 * dx, eta_1[-1000].data+ 40 * dx ) - plt.title('Low-Wavenumber Model reconst.', loc ='left') + ax0.set_xticklabels(eta_ticks / 1e3) + plt.xlim(eta_1[1000].data - 40 * dx, eta_1[-1000].data + 40 * dx) + plt.title("Low-Wavenumber Model reconst.", loc="left") + plt.ylabel("relative slope (m/m)") + plt.xlabel( + "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" + ) - plt.ylabel('relative slope (m/m)') - plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') + F.save_pup(path=plot_path + "B03_spectra/", name="B03_freq_reconst_x" + str(i)) - F.save_pup(path=plot_path+'B03_spectra/', name = 'B03_freq_reconst_x'+str(i)) - -MT.json_save('B03_success', plot_path, {'time':'time.asctime( time.localtime(time.time()) )'}) +MT.json_save( + "B03_success", plot_path, {"time": "time.asctime( time.localtime(time.time()) )"} +)