From b57b8861f7addde4b1710b7232be1f05df4c0c61 Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Mon, 11 Dec 2023 07:40:50 -0500 Subject: [PATCH 01/12] adding B03_plot_spectra_ov.py to the new packagin structure --- .../analysis_db/B03_plot_spectra_ov.py | 564 ++++++++++++++++++ src/icesat2_tracks/config/IceSAT2_startup.py | 2 +- 2 files changed, 565 insertions(+), 1 deletion(-) create mode 100644 src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py diff --git a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py new file mode 100644 index 00000000..6d6f5754 --- /dev/null +++ b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py @@ -0,0 +1,564 @@ +# %% +import os, sys +#execfile(os.environ['PYTHONSTARTUP']) + +""" +This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. +This is python 3 +""" + +from icesat2_tracks.config.IceSAT2_startup import ( + mconfig, + xr, + color_schemes, + font_for_pres, + plt, + np, + font_for_print +) + +#%matplotlib inline + +import icesat2_tracks.ICEsat2_SI_tools.convert_GPS_time as cGPS +import h5py +import icesat2_tracks.ICEsat2_SI_tools.io as io +import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec + +import time +import imp +import copy +import icesat2_tracks.ICEsat2_SI_tools.spicke_remover +import datetime +from matplotlib.gridspec import GridSpec +import icesat2_tracks.ICEsat2_SI_tools.generalized_FT as gFT +from scipy.ndimage.measurements import label +import icesat2_tracks.local_modules.m_tools_ph3 as MT +from icesat2_tracks.local_modules import m_general_ph3 as M + +#import s3fs +# %% +track_name, batch_key, test_flag = io.init_from_input(sys.argv) # loads standard experiment +#track_name, batch_key, test_flag = '20190605061807_10380310_004_01', 'SH_batch01', False +#track_name, batch_key, test_flag = '20190601094826_09790312_004_01', 'SH_batch01', False +#track_name, batch_key, test_flag = '20190207111114_06260210_004_01', 'SH_batch02', False +#track_name, batch_key, test_flag = '20190208152826_06440210_004_01', 'SH_batch01', False +#track_name, batch_key, test_flag = '20190213133330_07190212_004_01', 'SH_batch02', False +#track_name, batch_key, test_flag = '20190207002436_06190212_004_01', 'SH_batch02', False +#track_name, batch_key, test_flag = '20190206022433_06050212_004_01', 'SH_batch02', False + +#track_name, batch_key, test_flag = 'SH_20190101_00570212', 'SH_batch04', True +#track_name, batch_key, test_flag = 'SH_20190219_08070210', 'SH_batchminimal', True + + + +#track_name, batch_key, test_flag = '20190219073735_08070210_004_01', 'SH_batch02', False +#print(track_name, batch_key, test_flag) +hemis, batch = batch_key.split('_') + +load_path = mconfig['paths']['work'] +batch_key+'/B02_spectra/' +load_file = load_path + 'B02_' + track_name #+ '.nc' +plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/' + track_name + '/' +MT.mkdirs_r(plot_path) + +Gk = xr.open_dataset(load_file+'_gFT_k.nc') +Gx = xr.open_dataset(load_file+'_gFT_x.nc') + +Gfft = xr.open_dataset(load_file+'_FFT.nc') +# print(Gk) +# print(Gx) +time.sleep(2) + + +# %% +# for ibeam in Gk.beam: +# print(Gk.sel(beam=ibeam).gFT_PSD_data.data) + +# %% +all_beams = mconfig['beams']['all_beams'] +high_beams = mconfig['beams']['high_beams'] +low_beams = mconfig['beams']['low_beams'] +#Gfilt = io.load_pandas_table_dict(track_name + '_B01_regridded', load_path) # rhis is the rar photon data +#Gd = io.load_pandas_table_dict(track_name + '_B01_binned' , load_path) # +color_schemes.colormaps2(21) + +# %% check paths (again) + +col_dict= color_schemes.rels +F = M.figure_axis_xy(9, 3, view_scale =0.5) + +plt.subplot(1,3, 1) +plt.title(track_name , loc ='left') +for k in all_beams: + I = Gk.sel(beam=k) + I2 = Gx.sel(beam=k) + plt.plot(I['lon'], I['lat'], '.', c= col_dict[k], markersize = 0.7, linewidth =0.3) + plt.plot(I2['lon'], I2['lat'], '|', c= col_dict[k], markersize = 0.7 ) + + +plt.xlabel('lon') +plt.ylabel('lat') + +plt.subplot(1,3, 2) + +xscale= 1e3 +for k in all_beams: + I = Gk.sel(beam=k) + plt.plot( I['x_coord']/xscale , I['y_coord']/xscale, '.' , c= col_dict[k] , linewidth = 0.8, markersize = 0.8 ) + # I2 = G_gFT[k] + # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) + +plt.xlabel('x_coord (km)') +plt.ylabel('y_coord (km)') + +plt.subplot(1,3, 3) + +xscale= 1e3 +for k in all_beams: + I = Gk.sel(beam=k) + plt.plot( I['x_coord']/xscale , (I['y_coord']-I['y_coord'][0]), '.' , c= col_dict[k], linewidth = 0.8, markersize = 0.8) + # I2 = G_gFT[k] + # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) + +plt.xlabel('x_coord (km)') +plt.ylabel('y_coord deviation (m)') + + +F.save_light(path=plot_path, name = 'B03_specs_coord_check') + + +# %% +def dict_weighted_mean(Gdict, weight_key): + """ + returns the weighted meean of a dict of xarray, data_arrays + weight_key must be in the xr.DataArrays + """ + #Gdict = G_rar_fft + #weight_key='N_per_stancil' + + akey = list( Gdict.keys() )[0] + GSUM = Gdict[akey].copy() + GSUM.data = np.zeros(GSUM.shape) + N_per_stancil = GSUM.N_per_stancil * 0 + N_photons = np.zeros(GSUM.N_per_stancil.size) + + counter= 0 + for k,I in Gdict.items(): + #print(k) + I =I.squeeze() + print(len(I.x) ) + if len(I.x) !=0: + GSUM += I.where( ~np.isnan(I), 0) * I[weight_key] #.sel(x=GSUM.x) + N_per_stancil += I[weight_key] + if 'N_photons' in GSUM.coords: + N_photons += I['N_photons'] + counter+=1 + + GSUM = GSUM / N_per_stancil + + if 'N_photons' in GSUM.coords: + GSUM.coords['N_photons'] = (('x', 'beam'), np.expand_dims(N_photons, 1) ) + + GSUM['beam'] = ['weighted_mean'] + GSUM.name='power_spec' + + return GSUM + + +G_gFT_wmean = (Gk['gFT_PSD_data'].where( ~np.isnan(Gk['gFT_PSD_data']), 0) * Gk['N_per_stancil']).sum('beam')/ Gk['N_per_stancil'].sum('beam') +G_gFT_wmean['N_per_stancil'] = Gk['N_per_stancil'].sum('beam') + +G_fft_wmean = (Gfft.where( ~np.isnan(Gfft), 0) * Gfft['N_per_stancil']).sum('beam')/ Gfft['N_per_stancil'].sum('beam') +G_fft_wmean['N_per_stancil'] = Gfft['N_per_stancil'].sum('beam') + + +# %% plot +def plot_wavenumber_spectrogram(ax, Gi, clev, title= None, plot_photon_density=True ): + + if Gi.k[0] ==0: + Gi= Gi.sel(k=Gi.k[1:]) + x_lambda= 2 * np.pi/Gi.k + plt.pcolormesh(Gi.x/1e3, x_lambda , Gi, cmap=plt.cm.ocean_r , vmin = clev[0], vmax = clev[-1]) + + ax.set_yscale('log') + # plt.colorbar(orientation='vertical', pad=0.06, label='Spectral Power (m^2/m)') + + if plot_photon_density: + + plt.plot(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10 , c='black', linewidth= 0.8, label='NAN-density' ) + plt.fill_between(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10, 0, color='gray', alpha = 0.3) + ax.axhline(30, color='black', linewidth=0.3) + + #plt.xlabel('Distance from the Ice Edge (km)') + plt.ylim(x_lambda[-1], x_lambda[0]) + plt.title(title, loc='left') + +#Gplot = G.rolling(x=5, min_periods= 1, center=True).mean() +#Gmean = G_gFT_wmean.rolling(x=2, min_periods= 1, center=True).mean() +Gmean = G_gFT_wmean.rolling(k=5, center=True).mean() +#Gmean = Gmean.where(~np.isnan(Gmean), 0) +try: + k_max_range = Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1.25 +except: + k_max_range = Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1.25 + + +# %% +font_for_print() +F = M.figure_axis_xy(6.5, 5.6, container= True, view_scale =1) +Lmeters = Gk.L.data[0] + +plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) +gs = GridSpec(3,3, wspace=0.2, hspace=.5)#figure=fig, +#clev=np.arange(0, 6, 0.1)*3 + +#%matplotlib inline + +# define mean first for colorbar +Gplot = G_gFT_wmean.squeeze().rolling(k=10, min_periods= 1, center=True).median().rolling(x=3, min_periods= 1, center=True).median() +dd = 10 * np.log10(Gplot) +dd= dd.where(~np.isinf(dd), np.nan ) +clev_log = M.clevels( [dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31)* 1 + +#clev = M.clevels( [Gmean.quantile(0.6).data * 1e4, Gmean.quantile(0.99).data * 1e4], 31)/ 1e4 + +xlims= Gmean.x[0]/1e3, Gmean.x[-1]/1e3 + +k =high_beams[0] +for pos, k, pflag in zip([gs[0, 0],gs[0, 1],gs[0, 2] ], high_beams, [True, False, False] ): + ax0 = F.fig.add_subplot(pos) + Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() + #Gplot.mean('x').plot() + dd2 = 10 * np.log10(Gplot) + dd2= dd2.where(~np.isinf(dd2), np.nan ) + plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k + ' unsmoothed', plot_photon_density=True ) + plt.xlim(xlims) + # + if pflag: + plt.ylabel('Wave length\n(meters)') + plt.legend() + +for pos, k, pflag in zip([gs[1, 0],gs[1, 1],gs[1, 2] ], low_beams, [True, False, False] ): + ax0 = F.fig.add_subplot(pos) + Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() + #Gplot.mean('x').plot() + dd2 = 10 * np.log10(Gplot) + dd2= dd2.where(~np.isinf(dd2), np.nan ) + plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k+ ' unsmoothed', plot_photon_density=True ) + plt.xlim(xlims) + # + if pflag: + plt.ylabel('Wave length\n(meters)') + plt.legend() + +ax0 = F.fig.add_subplot(gs[2, 0]) + +plot_wavenumber_spectrogram(ax0, dd, clev_log , title ='smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$', plot_photon_density= True) +plt.xlim(xlims) + +# plt.plot(Gplot.x/1e3, 10* nan_list +20 , c='black', label='NAN-density' ) +# ax0.axhline(30, color='black', linewidth=0.5) + +ax0.axhline(2* np.pi/k_max_range[0], color='red', linestyle= '--', linewidth= 0.5) +ax0.axhline(2* np.pi/k_max_range[1], color='red', linestyle= '-', linewidth= 0.5) +ax0.axhline(2* np.pi/k_max_range[2], color='red', linestyle= '--', linewidth= 0.5) + +if pflag: + plt.ylabel('Wave length\n(meters)') + plt.legend() + +pos = gs[2, 1] +ax0 = F.fig.add_subplot(pos) +plt.title('Photons density ($m^{-1}$)', loc='left') + +for k in all_beams: + I = Gk.sel(beam = k)['gFT_PSD_data'] + plt.plot(Gplot.x/1e3, I.N_photons/I.L.data, label=k, linewidth=0.8) +plt.plot(Gplot.x/1e3, G_gFT_wmean.N_per_stancil/3/I.L.data , c='black', label='ave Photons' , linewidth=0.8) +plt.xlim(xlims) +plt.xlabel('Distance from the Ice Edge (km)') + +pos = gs[2, 2] + +ax0 = F.fig.add_subplot(pos) +ax0.set_yscale('log') + +plt.title('Peak Spectal Power', loc='left') + +x0 = Gk.x[0].data +for k in all_beams: + I = Gk.sel(beam = k)['gFT_PSD_data'] + plt.scatter(I.x.data/1e3, I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k').data , s=0.5, marker='.', color='red', alpha= 0.3) + + I= Gfft.sel(beam = k)#.to_array() + #I= I[:,I.N_per_stancil >= I.N_per_stancil.max().data*0.9] + plt.scatter( (x0 +I.x.data)/1e3, I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , s=0.5, marker='.', c='blue', alpha= 0.3) + + +Gplot= G_fft_wmean.squeeze() +Gplot = Gplot.power_spec[:,Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data*0.9] +plt.plot( (x0 + Gplot.x)/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.', markersize=1.5 , c='blue', label= 'FFT') + +Gplot= G_gFT_wmean.squeeze() +plt.plot( Gplot.x/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.' , markersize=1.5, c='red', label= 'gFT') + +plt.ylabel('1e-3 $(m)^2~m$') +plt.legend() +#plt.ylim(Gplot.min()*1.4, Gplot.max()*1.4 ) +#plt.xlim(xlims) + +F.save_light(path=plot_path, name = 'B03_specs_L'+str(Lmeters)) + +# %% +Gk.sel(beam = k).gFT_PSD_data.plot() + +# %% define simple routines +def plot_model_eta(D, ax, offset = 0, xscale= 1e3 , **kargs ): + eta = D.eta + D.x + y_data = D.y_model+offset + plt.plot(eta/xscale,y_data , **kargs) + + ax.axvline(eta[0].data/xscale , linewidth=2, color=kargs['color'], alpha=0.5) + ax.axvline(eta[-1].data/xscale, linewidth=2, color=kargs['color'], alpha=0.5) + +def add_info(D, Dk, ylims): + eta = D.eta + D.x + N_per_stancil, ksize = Dk.N_per_stancil.data , Dk.k.size + plt.text(eta[0].data, ylims[-1], ' N='+numtostr(N_per_stancil) + ' N/2M= '+ fltostr(N_per_stancil/2/ksize, 1) ) + +def plot_data_eta(D, offset = 0,xscale= 1e3 , **kargs ): + eta_1 = D.eta + D.x + y_data = D.y_model +offset + plt.plot(eta_1/xscale,y_data , **kargs) + return eta_1 + + +# %% phase examples +### overlapping views +#for i in np.arange(0,29,2): +# i = 4 +# c1= 'blue' +# c2= 'red' +# +# Gx_1 = Gx.isel(x= i).sel(beam = k) +# Gx_2 = Gx.isel(x= i+1).sel(beam = k) +# +# Gk_1 = Gk.isel(x= i).sel(beam = k) +# Gk_2 = Gk.isel(x= i+1).sel(beam = k) +# +# fltostr = MT.float_to_str +# numtostr = MT.num_to_str +# +# #if k%2 ==0: +# font_for_print() +# F = M.figure_axis_xy(9, 5, container =True, view_scale= 0.8) +# +# plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) +# gs = GridSpec(3,4, wspace=0.2, hspace=.5)#figure=fig, +# +# ax0 = F.fig.add_subplot(gs[0, :]) +# +# +# +# plot_model_eta(Gx_1, ax0, linestyle='-', color=c1, linewidth=0.4, alpha=1, zorder=12 ) +# plot_model_eta(Gx_2, ax0, linestyle='-', color=c2, linewidth=0.4, alpha=1, zorder=12 ) +# +# ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 +# +# add_info(Gx_1, Gk_1 , ylims ) +# add_info(Gx_2, Gk_1 , ylims ) +# +# # oringial data +# +# eta_1= plot_data_eta(Gx_1 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) +# eta_2= plot_data_eta(Gx_2 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) +# +# dx = eta_1.diff('eta').mean() +# plt.xlim(eta_1[0].data - 40 * dx, eta_2[-1].data + 40 * dx ) +# plt.ylim(ylims[0], ylims[-1]) +# + +# %% Single views + +def plot_data_eta(D, offset = 0 , **kargs ): + eta_1 = D.eta# + D.x + y_data = D.y_model +offset + plt.plot(eta_1,y_data , **kargs) + return eta_1 + +def plot_model_eta(D, ax, offset = 0, **kargs ): + eta = D.eta #+ D.x + y_data = D.y_model+offset + plt.plot(eta ,y_data , **kargs) + + ax.axvline(eta[0].data, linewidth=0.1, color=kargs['color'], alpha=0.5) + ax.axvline(eta[-1].data, linewidth=0.1, color=kargs['color'], alpha=0.5) + +if ('y_data' in Gx.sel(beam = 'gt3r').keys()): + print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) +else: + print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) + MT.json_save('B03_fail', plot_path, {'reason':'no y_data'}) + print('failed, exit') + exit() + + +# %% +fltostr = MT.float_to_str +numtostr = MT.num_to_str + +font_for_print() + + +#for i in x_pos_sel[::2]: +#i =x_pos_sel[20] +MT.mkdirs_r(plot_path+'B03_spectra/') + +x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data.data)] +x_pos_max = Gk.mean('beam').mean('k').gFT_PSD_data[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data)].argmax().data +xpp = x_pos_sel[ [int(i) for i in np.round(np.linspace(0, x_pos_sel.size-1, 4))]] +xpp = np.insert(xpp, 0, x_pos_max) + +for i in xpp: + + #i = xpp[0] + F = M.figure_axis_xy(6, 8, container =True, view_scale= 0.8) + + plt.suptitle('gFT Model and Spectrograms | x='+str(Gk.x[i].data)+' \n' + track_name, y = 0.95) + gs = GridSpec(5,6, wspace=0.2, hspace=0.7)#figure=fig, + + ax0 = F.fig.add_subplot(gs[0:2, :]) + col_d = color_schemes.__dict__['rels'] + + neven = True + offs = 0 + for k in all_beams: + + Gx_1 = Gx.isel(x= i).sel(beam = k) + Gk_1 = Gk.isel(x= i).sel(beam = k) + + plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) + ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 + #add_info(Gx_1, Gk_1 , ylims ) + + # oringial data + eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) + + # reconstruct in gaps + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) + _ = FT.get_H() + FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) + plt.plot(Gx_1.eta, FT.model()+offs ,'-', c='orange', linewidth=0.3, alpha=1,zorder= 2) + + if neven: + neven = False + offs += .3 + else: + neven = True + offs +=0.6 + + + dx = eta_1.diff('eta').mean().data + + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + + ax0.set_xticks(eta_ticks) + ax0.set_xticklabels(eta_ticks/1e3) + plt.xlim( eta_1[0].data - 40 * dx, eta_1[-1].data+ 40 * dx ) + plt.title('Model reconst.', loc ='left') + + + plt.ylabel('relative slope (m/m)') + plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') + + + # spectra + # define threshold + k_thresh = 0.085 + ax1_list = list() + dd_max=list() + for pos, kgroup, lflag in zip([ gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], [['gt1l', 'gt1r'], ['gt2l', 'gt2r'], ['gt3l', 'gt3r']], [True, False, False] ): + + ax11 = F.fig.add_subplot(pos) + ax11.tick_params(labelleft=lflag) + ax1_list.append(ax11) + for k in kgroup: + + Gx_1 = Gx.isel(x= i).sel(beam = k) + Gk_1 = Gk.isel(x= i).sel(beam = k) + + klim= Gk_1.k[0], Gk_1.k[-1] + + if 'l' in k: + dd = Gk_1.gFT_PSD_data#.rolling(k=10, min_periods= 1, center=True).mean() + plt.plot(Gk_1.k, dd, color='gray', linewidth=.5 ,alpha= 0.5 ) + + dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods= 1, center=True).mean() + plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=.8 ) + dd_max.append(np.nanmax(dd.data)) + plt.xlim(klim) + + if lflag: + plt.ylabel('$(m/m)^2/k$') + plt.title('Energy Spectra', loc ='left') + + plt.xlabel('wavenumber k (2$\pi$ m$^{-1}$)') + + #plt.ylim(dd.min(), max(dd_max) * 1.1) + + ax11.axvline(k_thresh, linewidth=1, color='gray', alpha=1) + ax11.axvspan(k_thresh , klim[-1], color='gray', alpha=0.5, zorder=12) + + if ~np.isnan(np.nanmax(dd_max)): + for ax in ax1_list: + ax.set_ylim(0, np.nanmax(dd_max) * 1.1) + + ax0 = F.fig.add_subplot(gs[-2:, :]) + + neven = True + offs = 0 + for k in all_beams: + + Gx_1 = Gx.isel(x= i).sel(beam = k) + Gk_1 = Gk.isel(x= i).sel(beam = k) + + #plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) + ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 + #add_info(Gx_1, Gk_1 , ylims ) + + # oringial data + eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1.5, alpha =0.5, zorder=11) + + # reconstruct in gaps + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) + _ = FT.get_H() + FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) + + p_hat_k = np.concatenate([ Gk_1.k, Gk_1.k ]) + k_mask = p_hat_k < k_thresh + FT.p_hat[~k_mask] = 0 + + plt.plot(Gx_1.eta, FT.model()+offs ,'-', c=col_d[k], linewidth=0.8, alpha=1,zorder= 12) + + if neven: + neven = False + offs += .3 + else: + neven = True + offs +=0.6 + + dx = eta_1.diff('eta').mean().data + + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + + ax0.set_xticks(eta_ticks) + ax0.set_xticklabels(eta_ticks/1e3) + plt.xlim( eta_1[1000].data - 40 * dx, eta_1[-1000].data+ 40 * dx ) + plt.title('Low-Wavenumber Model reconst.', loc ='left') + + + plt.ylabel('relative slope (m/m)') + plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') + + F.save_pup(path=plot_path+'B03_spectra/', name = 'B03_freq_reconst_x'+str(i)) + +MT.json_save('B03_success', plot_path, {'time':'time.asctime( time.localtime(time.time()) )'}) diff --git a/src/icesat2_tracks/config/IceSAT2_startup.py b/src/icesat2_tracks/config/IceSAT2_startup.py index 30ffaffb..df477bae 100644 --- a/src/icesat2_tracks/config/IceSAT2_startup.py +++ b/src/icesat2_tracks/config/IceSAT2_startup.py @@ -12,7 +12,7 @@ import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt -from matplotlib.gridspec import GridSpec + import matplotlib.colors as colors import pandas as pd from icesat2_tracks.local_modules import m_colormanager_ph3 as M_color From 57d9a22b946072d261dd8bfa62ab59cf438ba2d2 Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Tue, 26 Dec 2023 20:15:21 -0500 Subject: [PATCH 02/12] cleaning and formatting files --- .../ICEsat2_SI_tools/generalized_FT.py | 962 ++++++++++-------- .../analysis_db/B03_plot_spectra_ov.py | 695 +++++++------ 2 files changed, 897 insertions(+), 760 deletions(-) diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py index 1a2d2fcd..a8579e80 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py @@ -1,43 +1,46 @@ - import numpy as np import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec import icesat2_tracks.ICEsat2_SI_tools.lanczos as lanczos -def rebin(data, dk, return_edges =False): + +def rebin(data, dk, return_edges=False): """ rebin data to a new k-grid with dk """ - k_low_limits =data.k[::10] - Gmean = data.groupby_bins('k' , k_low_limits).mean() - k_low = (k_low_limits + k_low_limits.diff('k')[0]/2).data - Gmean['k_bins'] = k_low[0:-1] - Gmean = Gmean.rename({'k_bins': 'k'}) + k_low_limits = data.k[::10] + Gmean = data.groupby_bins("k", k_low_limits).mean() + k_low = (k_low_limits + k_low_limits.diff("k")[0] / 2).data + Gmean["k_bins"] = k_low[0:-1] + Gmean = Gmean.rename({"k_bins": "k"}) if return_edges: return Gmean, k_low_limits else: return Gmean + # define weight function def smooth_data_to_weight(dd, m=150): - """ returns a weight function from smooth data dd is the data m is the number of points to smooth over """ - dd_fake = np.ones( 4*m + dd.size)*dd.max()*0.01 - dd_fake[2*m:-2*m]=dd + dd_fake = np.ones(4 * m + dd.size) * dd.max() * 0.01 + dd_fake[2 * m : -2 * m] = dd weight = lanczos.lanczos_filter_1d_wrapping(np.arange(dd_fake.size), dd_fake, m) - #weight= M.runningmean_wrap_around(dd_fake, m=m) - weight=weight[2*m:-2*m] - weight=weight/weight.max() + # weight= M.runningmean_wrap_around(dd_fake, m=m) + weight = weight[2 * m : -2 * m] + weight = weight / weight.max() return weight -def get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=False, method = 'gaussian' ): + +def get_weights_from_data( + x, y, dx, stancil, k, max_nfev, plot_flag=False, method="gaussian" +): """ x,y, x postions and y data, on any (regular) postion, has gaps dx dx @@ -47,13 +50,12 @@ def get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=False, metho returns: peak-normalized weights in the size of k """ - #make y gridded - x_pos = (np.round( (x - stancil[0])/ dx -1 , 0) ).astype('int') - x_model = np.arange(stancil[0], stancil[-1], dx) - y_gridded = np.copy(x_model) * 0 + # make y gridded + x_pos = (np.round((x - stancil[0]) / dx - 1, 0)).astype("int") + x_model = np.arange(stancil[0], stancil[-1], dx) + y_gridded = np.copy(x_model) * 0 y_gridded[x_pos] = y - #nan_mask =np.isnan(y_gridded) - + # nan_mask =np.isnan(y_gridded) # def gaus(x, x_0, amp, sigma_g ): # return amp* np.exp(-0.5 * ( (x-x_0)/sigma_g)**2) @@ -61,103 +63,117 @@ def get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=False, metho # weight = weight *10+ weight.max()* 0.005 # add pemnalty floor # take FFT to get peaj parameters - k_fft = np.fft.rfftfreq(x_model.size, d=dx) * 2* np.pi - f_weight= np.sqrt(9.81 * k_fft) / (2 *np.pi) - data_weight = spec.Z_to_power(np.fft.rfft(y_gridded), np.diff(f_weight).mean(), x_pos.size) + k_fft = np.fft.rfftfreq(x_model.size, d=dx) * 2 * np.pi + f_weight = np.sqrt(9.81 * k_fft) / (2 * np.pi) + data_weight = spec.Z_to_power( + np.fft.rfft(y_gridded), np.diff(f_weight).mean(), x_pos.size + ) - Spec_fft = get_prior_spec(f_weight, data_weight ) + Spec_fft = get_prior_spec(f_weight, data_weight) - pars = Spec_fft.set_parameters(flim= np.sqrt(9.81 * k[-1] ) /2/np.pi) - k_max = (pars['f_max'].value *2 *np.pi)**2/ 9.81 - #print('k_max ', k_max) + pars = Spec_fft.set_parameters(flim=np.sqrt(9.81 * k[-1]) / 2 / np.pi) + k_max = (pars["f_max"].value * 2 * np.pi) ** 2 / 9.81 + # print('k_max ', k_max) - - if method is 'gaussian': + if method == "gaussian": # simple gaussian weight - def gaus(x, x_0, amp, sigma_g ): - return amp* np.exp(-0.5 * ( (x-x_0)/sigma_g)**2) + def gaus(x, x_0, amp, sigma_g): + return amp * np.exp(-0.5 * ((x - x_0) / sigma_g) ** 2) - weight = gaus(k, k_max, 1 , 0.02)**(1/2) - #weight = weight *1+ weight.max()* 0.1 # add pemnalty floor + weight = gaus(k, k_max, 1, 0.02) ** (1 / 2) + # weight = weight *1+ weight.max()* 0.1 # add pemnalty floor params = None - elif method is 'parametric': - + elif method == "parametric": # JONSWAP weight - f= np.sqrt(9.81 * k) / (2 *np.pi) - #weight = weight + weight.max()* 0.1 # add pemnalty floor + f = np.sqrt(9.81 * k) / (2 * np.pi) + # weight = weight + weight.max()* 0.1 # add pemnalty floor # optimzes paramteric function to data - #Spec_fft.data = Spec_fft.runningmean(Spec_fft.data , 10, tailcopy=True) - #Spec_fft.data[np.isnan(Spec_fft.data)] = 0 + # Spec_fft.data = Spec_fft.runningmean(Spec_fft.data , 10, tailcopy=True) + # Spec_fft.data[np.isnan(Spec_fft.data)] = 0 - weight = Spec_fft.create_weight(freq = f, plot_flag= False, max_nfev=max_nfev) + weight = Spec_fft.create_weight(freq=f, plot_flag=False, max_nfev=max_nfev) if plot_flag: Spec_fft.fitter.params.pretty_print() params = Spec_fft.fitter.params - #weight = weight+ weight.max()* 0.05 # add pemnalty floor + # weight = weight+ weight.max()* 0.05 # add pemnalty floor else: raise ValueError(" 'method' must be either 'gaussian' or 'parametric' ") - if plot_flag: import matplotlib.pyplot as plt - #plt.plot(k_fft[1:], Spec_fft.model_func(Spec_fft.freq, pars), 'b--' ) - plt.plot(k_fft[1:], Spec_fft.data, c='gray',label='FFT for Prior', linewidth = 0.5) - plt.plot(k, weight, zorder=12, c='black' , label = 'Fitted model to FFT', linewidth = 0.5) - plt.xlim(k[0],k[-1] ) - #plt.show() + # plt.plot(k_fft[1:], Spec_fft.model_func(Spec_fft.freq, pars), 'b--' ) + plt.plot( + k_fft[1:], Spec_fft.data, c="gray", label="FFT for Prior", linewidth=0.5 + ) + plt.plot( + k, weight, zorder=12, c="black", label="Fitted model to FFT", linewidth=0.5 + ) + plt.xlim(k[0], k[-1]) + # plt.show() # add pemnalty floor - weight = weight + weight.max()* 0.1 + weight = weight + weight.max() * 0.1 # peak normlize weight - weight = weight/weight.max() + weight = weight / weight.max() return weight, params + def make_xarray_from_dict(D, name, dims, coords): import xarray as xr + D_return = dict() - for xi,I in D.items(): - coords['x'] = xi - D_return[xi] = xr.DataArray(I, dims=dims, coords=coords , name=name) + for xi, I in D.items(): + coords["x"] = xi + D_return[xi] = xr.DataArray(I, dims=dims, coords=coords, name=name) return D_return + def define_weights(stancil, prior, x, y, dx, k, max_nfev, plot_flag=False): """ defines weights for the inversion, either from the data or from the prior, or a mix return weights normalized to 1, prior_pars used for the next iteration """ - if (type(prior[0]) is bool) and not prior[0] : # prior = (False, None), this is the first iteration + if (type(prior[0]) is bool) and not prior[ + 0 + ]: # prior = (False, None), this is the first iteration # fit function to data - weight, prior_pars = get_weights_from_data(x, y, dx, stancil, k, max_nfev, plot_flag=plot_flag, method='parametric') - #weight_name = "10 * $P_{init}$ from FFT" + weight, prior_pars = get_weights_from_data( + x, y, dx, stancil, k, max_nfev, plot_flag=plot_flag, method="parametric" + ) + # weight_name = "10 * $P_{init}$ from FFT" weight_name = "$P_{init}$ from FFT" - elif (type(prior) is tuple): # prior= (PSD_from_GFT, weight_used in inversion), this is all other first iteration + elif ( + type(prior) is tuple + ): # prior= (PSD_from_GFT, weight_used in inversion), this is all other first iteration # combine old and new weights weight = 0.2 * smooth_data_to_weight(prior[0]) + 0.8 * prior[1] - #weight_name = "10 * smth. $P_{i-1}$" + # weight_name = "10 * smth. $P_{i-1}$" weight_name = "smth. $P_{i-1}$" - prior_pars = {'alpha': None, 'amp': None, 'f_max': None, 'gamma':None} - else: # prior = weight, this is all other iterations + prior_pars = {"alpha": None, "amp": None, "f_max": None, "gamma": None} + else: # prior = weight, this is all other iterations weight = smooth_data_to_weight(prior) weight_name = "smth. from data" - prior_pars = {'alpha': None, 'amp': None, 'f_max': None, 'gamma':None} + prior_pars = {"alpha": None, "amp": None, "f_max": None, "gamma": None} if plot_flag: import matplotlib.pyplot as plt - plt.plot(k, weight, zorder=12, c='darkgreen', linewidth = 0.8,label = weight_name) + + plt.plot(k, weight, zorder=12, c="darkgreen", linewidth=0.8, label=weight_name) # peak normlize weights by std of data - weight = weight/ weight.std() + weight = weight / weight.std() return weight, prior_pars - + + class wavenumber_spectrogram_gFT(object): - def __init__(self, x, data, L, dx, wavenumber, data_error = None, ov=None): + def __init__(self, x, data, L, dx, wavenumber, data_error=None, ov=None): """ returns a wavenumber spectrogram with the resolution L-ov this uses Lombscargle @@ -177,21 +193,31 @@ def __init__(self, x, data, L, dx, wavenumber, data_error = None, ov=None): other arributes are in the .attr dict. """ - self.Lmeters = L - self.ov = int(L/2) if ov is None else ov #when not defined in create_chunk_boundaries then L/2 + self.Lmeters = L + self.ov = ( + int(L / 2) if ov is None else ov + ) # when not defined in create_chunk_boundaries then L/2 - self.x = x - self.dx = dx - self.data = data + self.x = x + self.dx = dx + self.data = data self.error = data_error if data_error is not None else None - self.Lpoints= int(self.Lmeters/self.dx) + self.Lpoints = int(self.Lmeters / self.dx) # create subsample k - self.k, self.dk = wavenumber, np.diff(wavenumber).mean() - - - def cal_spectrogram(self, x = None, data=None, error=None, name=None, xlims =None, max_nfev = None, map_func=None, plot_flag = False): - + self.k, self.dk = wavenumber, np.diff(wavenumber).mean() + + def cal_spectrogram( + self, + x=None, + data=None, + error=None, + name=None, + xlims=None, + max_nfev=None, + map_func=None, + plot_flag=False, + ): """ defines apply function and calculated all sub-sample sprectra using map @@ -214,23 +240,20 @@ def cal_spectrogram(self, x = None, data=None, error=None, name=None, xlims =Non import copy import pandas as pd - X = self.x if x is None else x # all x positions - DATA = self.data if data is None else data # all data points - ERR = self.error if error is None else error # all error for points - Lmeters, dk = self.Lmeters, self.dk + X = self.x if x is None else x # all x positions + DATA = self.data if data is None else data # all data points + ERR = self.error if error is None else error # all error for points + Lmeters, dk = self.Lmeters, self.dk Lpoints = self.Lpoints - Lpoints_full = int(Lmeters/self.dx) - #win = self.win - self.xlims = ( np.round(X.min()), X.max() ) if xlims is None else xlims + Lpoints_full = int(Lmeters / self.dx) + # win = self.win + self.xlims = (np.round(X.min()), X.max()) if xlims is None else xlims # init Lomb scargle object with noise as nummy data () - #dy_fake= np.random.randn(len(dy))*0.001 if self.dy is not None else None - #self.LS = LombScargle(X[0:L] , np.random.randn(L)*0.001, fit_mean=True) - - + # dy_fake= np.random.randn(len(dy))*0.001 if self.dy is not None else None + # self.LS = LombScargle(X[0:L] , np.random.randn(L)*0.001, fit_mean=True) def calc_gFT_apply(stancil, prior): - """ windows the data accoding to stencil and applies LS spectrogram returns: stancil center, spectrum for this stencil, number of datapoints in stancil @@ -238,238 +261,287 @@ def calc_gFT_apply(stancil, prior): from scipy.signal import detrend import matplotlib.pyplot as plt import time + ta = time.perf_counter() - #x = X[stancil[0]:stancil[-1]] - x_mask= (stancil[0] <= X) & (X <= stancil[-1]) + # x = X[stancil[0]:stancil[-1]] + x_mask = (stancil[0] <= X) & (X <= stancil[-1]) print(stancil[1]) x = X[x_mask] - if x.size/Lpoints < 0.1: # if there are not enough photos set results to nan - #return stancil[1], self.k*np.nan, np.fft.rfftfreq( int(self.Lpoints), d=self.dx)*np.nan, x.size - #return stancil[1], np.concatenate([self.k*np.nan , self.k*np.nan]), np.nan, np.nan, np.nan, x.size, False, False - return { 'stancil_center': stancil[1], - 'p_hat': np.concatenate([self.k*np.nan , self.k*np.nan]), - 'inverse_stats': np.nan, - 'y_model_grid': np.nan, - 'y_data_grid': np.nan, - 'x_size': x.size, - 'PSD': False, - 'weight': False, - 'spec_adjust': np.nan} - + if ( + x.size / Lpoints < 0.1 + ): # if there are not enough photos set results to nan + # return stancil[1], self.k*np.nan, np.fft.rfftfreq( int(self.Lpoints), d=self.dx)*np.nan, x.size + # return stancil[1], np.concatenate([self.k*np.nan , self.k*np.nan]), np.nan, np.nan, np.nan, x.size, False, False + return { + "stancil_center": stancil[1], + "p_hat": np.concatenate([self.k * np.nan, self.k * np.nan]), + "inverse_stats": np.nan, + "y_model_grid": np.nan, + "y_data_grid": np.nan, + "x_size": x.size, + "PSD": False, + "weight": False, + "spec_adjust": np.nan, + } y = DATA[x_mask] y_var = y.var() FT = generalized_Fourier(x, y, self.k) - #H = FT.get_H() + # H = FT.get_H() if plot_flag: import matplotlib.pyplot as plt - plt.figure(figsize=(3.34, 1.8),dpi=300) + + plt.figure(figsize=(3.34, 1.8), dpi=300) # define weights. Weights are normalized to 1 - weight, prior_pars =define_weights(stancil, prior, x, y, self.dx, self.k , max_nfev, plot_flag=plot_flag) + weight, prior_pars = define_weights( + stancil, prior, x, y, self.dx, self.k, max_nfev, plot_flag=plot_flag + ) # rescale weights to 80% of the variance of the data - weight = weight * 0.8 * y_var + weight = weight * 0.8 * y_var # define error err = ERR[x_mask] if ERR is not None else 1 - - print( 'weights : ', time.perf_counter() - ta) + print("weights : ", time.perf_counter() - ta) ta = time.perf_counter() - FT.define_problem(weight, err) # 1st arg is Penalty, 2nd is error + FT.define_problem(weight, err) # 1st arg is Penalty, 2nd is error # solve problem: p_hat = FT.solve() - print( 'solve : ', time.perf_counter() - ta) + print("solve : ", time.perf_counter() - ta) ta = time.perf_counter() - x_pos = (np.round( (x - stancil[0])/ self.dx , 0) ).astype('int') - eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters/2 - y_model_grid = np.copy(eta) *np.nan - y_model_grid[x_pos] = FT.model() # returns dimensional model + x_pos = (np.round((x - stancil[0]) / self.dx, 0)).astype("int") + eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters / 2 + y_model_grid = np.copy(eta) * np.nan + y_model_grid[x_pos] = FT.model() # returns dimensional model # save data on this grid as well - y_data_grid = np.copy(eta) *np.nan + y_data_grid = np.copy(eta) * np.nan y_data_grid[x_pos] = y inverse_stats = FT.get_stats(self.dk, Lpoints_full, print_flag=True) # add fitting parameters of Prior to stats dict - for k,I in prior_pars.items(): + for k, I in prior_pars.items(): try: inverse_stats[k] = I.value except: inverse_stats[k] = np.nan - - print( 'stats : ', time.perf_counter() - ta) + + print("stats : ", time.perf_counter() - ta) # Z = complex_represenation(p_hat, FT.M, Lpoints ) # multiply with the standard deviation of the data to get dimensions right - PSD = power_from_model(p_hat, dk, self.k.size, x.size, Lpoints) #Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) - - if self.k.size*2 > x.size: - col = 'red' + PSD = power_from_model( + p_hat, dk, self.k.size, x.size, Lpoints + ) # Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) + + if self.k.size * 2 > x.size: + col = "red" else: - col= 'blue' + col = "blue" if plot_flag: - #PSD_nondim = power_from_model(p_hat , dk, self.k.size, x.size, Lpoints) #Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) - plt.plot(self.k, PSD, color=col , label= 'GFT fit', linewidth = 0.5) - plt.title( 'non-dim Spectral Segment Models, 2M='+ str(self.k.size*2) + ', N='+ str(x.size) +'\n@ $X_i=$'+str(round(stancil[1]/1e3, 1)) +'km' , loc='left', size=6) - plt.xlim(self.k[0],self.k[-1]) - plt.xlabel('Wavenumber k') - plt.ylabel('Power (m^2/k)') + # PSD_nondim = power_from_model(p_hat , dk, self.k.size, x.size, Lpoints) #Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) + plt.plot(self.k, PSD, color=col, label="GFT fit", linewidth=0.5) + plt.title( + "non-dim Spectral Segment Models, 2M=" + + str(self.k.size * 2) + + ", N=" + + str(x.size) + + "\n@ $X_i=$" + + str(round(stancil[1] / 1e3, 1)) + + "km", + loc="left", + size=6, + ) + plt.xlim(self.k[0], self.k[-1]) + plt.xlabel("Wavenumber k") + plt.ylabel("Power (m^2/k)") plt.legend() plt.show() - print('---------------------------------') + print("---------------------------------") # return dict with all relevant data return_dict = { - 'stancil_center': stancil[1], - 'p_hat': p_hat, - 'inverse_stats': inverse_stats, - 'y_model_grid': y_model_grid, - 'y_data_grid': y_data_grid, - 'x_size': x.size, - 'PSD': PSD, - 'weight': weight, - 'spec_adjust': inverse_stats['spec_adjust'] + "stancil_center": stancil[1], + "p_hat": p_hat, + "inverse_stats": inverse_stats, + "y_model_grid": y_model_grid, + "y_data_grid": y_data_grid, + "x_size": x.size, + "PSD": PSD, + "weight": weight, + "spec_adjust": inverse_stats["spec_adjust"], } return return_dict - # stancil[1], p_hat, - # inverse_stats, y_model_grid , - # y_data_grid, x.size, - # PSD, weight, + # stancil[1], p_hat, + # inverse_stats, y_model_grid , + # y_data_grid, x.size, + # PSD, weight, # inverse_stats['spec_adjust'] - # % derive L2 stancil - self.stancil_iter = spec.create_chunk_boundaries_unit_lengths(Lmeters, self.xlims, ov= self.ov, iter_flag=True) - #stancil_iter = create_chunk_boundaries_unit_lengths(L, ( np.round(X.min()), X.max() ), ov= self.ov, iter_flag=True) + self.stancil_iter = spec.create_chunk_boundaries_unit_lengths( + Lmeters, self.xlims, ov=self.ov, iter_flag=True + ) + # stancil_iter = create_chunk_boundaries_unit_lengths(L, ( np.round(X.min()), X.max() ), ov= self.ov, iter_flag=True) # apply func to all stancils - Spec_returns=list() + Spec_returns = list() # form: PSD_from_GFT, weight_used in inversion - prior= False, False + prior = False, False for ss in copy.copy(self.stancil_iter): - #print(ss) - #prior= False, False + # print(ss) + # prior= False, False # prior step - if prior[0] is False: # make NL fit of piors do not exist - print('1st step with NL-fit') + if prior[0] is False: # make NL fit of piors do not exist + print("1st step with NL-fit") I_return = calc_gFT_apply(ss, prior=prior) - prior = I_return['PSD'], I_return['weight'] #I_return[6], I_return[7] + prior = I_return["PSD"], I_return["weight"] # I_return[6], I_return[7] # 2nd step if prior[0] is False: - print('priors still false skip 2nd step') + print("priors still false skip 2nd step") else: - print('2nd step use set priors:', type(prior[0]), type(prior[0]) ) + print("2nd step use set priors:", type(prior[0]), type(prior[0])) I_return = calc_gFT_apply(ss, prior=prior) - prior = I_return['PSD'], I_return['weight'] # I_return[6], I_return[7] - - #print(I_return[6]) - Spec_returns.append( dict((k, I_return[k]) for k in ('stancil_center', 'p_hat', 'inverse_stats', 'y_model_grid', 'y_data_grid', 'x_size', 'spec_adjust', 'weight'))) - #Spec_returns.append( [I_return[0],I_return[1],I_return[2],I_return[3],I_return[4],I_return[5]] ) + prior = I_return["PSD"], I_return["weight"] # I_return[6], I_return[7] + + # print(I_return[6]) + Spec_returns.append( + dict( + (k, I_return[k]) + for k in ( + "stancil_center", + "p_hat", + "inverse_stats", + "y_model_grid", + "y_data_grid", + "x_size", + "spec_adjust", + "weight", + ) + ) + ) + # Spec_returns.append( [I_return[0],I_return[1],I_return[2],I_return[3],I_return[4],I_return[5]] ) # map_func = map if map_func is None else map_func # print(map_func) # Spec_returns = list(map_func( calc_gFT_apply, copy.copy(self.stancil_iter) )) # # linear version - #Spec_returns = list(map( calc_spectrum_and_field_apply, copy.copy(self.stancil_iter) )) + # Spec_returns = list(map( calc_spectrum_and_field_apply, copy.copy(self.stancil_iter) )) # unpack resutls of the mapping: - GFT_model = dict() - Z_model = dict() + GFT_model = dict() + Z_model = dict() - D_specs = dict() - D_specs_model = dict() + D_specs = dict() + D_specs_model = dict() - Pars = dict() - y_model = dict() - y_data = dict() - N_per_stancil = list() + Pars = dict() + y_model = dict() + y_data = dict() + N_per_stancil = list() Spec_adjust_per_stancil = list() - weights = dict() + weights = dict() for I in Spec_returns: - - x_center = I['stancil_center'] - spec_adjust = I['spec_adjust'] - GFT_model[x_center] = (I['p_hat'][0:self.k.size], I['p_hat'][self.k.size:]) - Z_model[x_center] = Z = complex_represenation(I['p_hat'], self.k.size, Lpoints ) - - PSD_data, PSD_model = Z_to_power_gFT(Z, self.dk, I['x_size'], Lpoints ) - D_specs[x_center] = PSD_data * spec_adjust - D_specs_model[x_center] = PSD_model * spec_adjust * 0 # set to zero because this data should not be used anymore - - Pars[x_center] = I['inverse_stats'] - y_model[x_center] = I['y_model_grid'] - y_data[x_center] = I['y_data_grid'] - - weights[x_center] = I['weight'] - - N_per_stancil.append(I['x_size']) + x_center = I["stancil_center"] + spec_adjust = I["spec_adjust"] + GFT_model[x_center] = ( + I["p_hat"][0 : self.k.size], + I["p_hat"][self.k.size :], + ) + Z_model[x_center] = Z = complex_represenation( + I["p_hat"], self.k.size, Lpoints + ) + + PSD_data, PSD_model = Z_to_power_gFT(Z, self.dk, I["x_size"], Lpoints) + D_specs[x_center] = PSD_data * spec_adjust + D_specs_model[x_center] = ( + PSD_model * spec_adjust * 0 + ) # set to zero because this data should not be used anymore + + Pars[x_center] = I["inverse_stats"] + y_model[x_center] = I["y_model_grid"] + y_data[x_center] = I["y_data_grid"] + + weights[x_center] = I["weight"] + + N_per_stancil.append(I["x_size"]) Spec_adjust_per_stancil.append(spec_adjust) - print("# of x-coordinates" + str(len(Spec_returns)) ) + print("# of x-coordinates" + str(len(Spec_returns))) - self.N_per_stancil = N_per_stancil - chunk_positions = np.array(list(D_specs.keys())) - self.N_stancils = len(chunk_positions) # number of spectral realizatiobs + self.N_per_stancil = N_per_stancil + chunk_positions = np.array(list(D_specs.keys())) + self.N_stancils = len(chunk_positions) # number of spectral realizatiobs # repack data, create xarray # 1st LS spectal estimates - # G_power_data = dict() # for xi,I in D_specs.items(): # G_power_data[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_PSD_data') - G_power_data = make_xarray_from_dict(D_specs, 'gFT_PSD_data', ['k'], {'k': self.k} ) - G_power_data = xr.concat(G_power_data.values(), dim='x').T#.to_dataset() - + G_power_data = make_xarray_from_dict( + D_specs, "gFT_PSD_data", ["k"], {"k": self.k} + ) + G_power_data = xr.concat(G_power_data.values(), dim="x").T # .to_dataset() # G_power_model = dict() # for xi,I in D_specs_model.items(): # G_power_model[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_PSD_model') - G_power_model = make_xarray_from_dict(D_specs_model, 'gFT_PSD_model', ['k'], {'k': self.k} ) - G_power_model = xr.concat(G_power_model.values(), dim='x').T#.to_dataset() - - self.G = G_power_model - self.G.name = 'gFT_PSD_model' + G_power_model = make_xarray_from_dict( + D_specs_model, "gFT_PSD_model", ["k"], {"k": self.k} + ) + G_power_model = xr.concat(G_power_model.values(), dim="x").T # .to_dataset() + + self.G = G_power_model + self.G.name = "gFT_PSD_model" - #2nd FFT(Y_model) + # 2nd FFT(Y_model) # G_model_Z =dict() # for xi,I in Z_model.items(): # # if I.size < Y_model_k_fft.size: # # I = np.insert(I, -1, I[-1]) # G_model_Z[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='Z_hat') - G_model_Z = make_xarray_from_dict(Z_model, 'Z_hat', ['k'], {'k': self.k} ) - G_model_Z = xr.concat(G_model_Z.values(), dim='x').T#.to_dataset() + G_model_Z = make_xarray_from_dict(Z_model, "Z_hat", ["k"], {"k": self.k}) + G_model_Z = xr.concat(G_model_Z.values(), dim="x").T # .to_dataset() # 3rd - GFT_model_coeff_A =dict() - GFT_model_coeff_B =dict() - for xi,I in GFT_model.items(): + GFT_model_coeff_A = dict() + GFT_model_coeff_B = dict() + for xi, I in GFT_model.items(): # if I.size < Y_model_k_fft.size: # I = np.insert(I, -1, I[-1]) - GFT_model_coeff_A[xi] = xr.DataArray(I[0], dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_cos_coeff') - GFT_model_coeff_B[xi] = xr.DataArray(I[1], dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_sin_coeff') - - GFT_model_coeff_A = xr.concat(GFT_model_coeff_A.values(), dim='x').T#.to_dataset() - GFT_model_coeff_B = xr.concat(GFT_model_coeff_B.values(), dim='x').T#.to_dataset() + GFT_model_coeff_A[xi] = xr.DataArray( + I[0], dims=["k"], coords={"k": self.k, "x": xi}, name="gFT_cos_coeff" + ) + GFT_model_coeff_B[xi] = xr.DataArray( + I[1], dims=["k"], coords={"k": self.k, "x": xi}, name="gFT_sin_coeff" + ) + + GFT_model_coeff_A = xr.concat( + GFT_model_coeff_A.values(), dim="x" + ).T # .to_dataset() + GFT_model_coeff_B = xr.concat( + GFT_model_coeff_B.values(), dim="x" + ).T # .to_dataset() # add weights to the data - weights_k = make_xarray_from_dict(weights , 'weight' , ['k'], {'k': self.k} ) - weights_k = xr.concat(weights_k.values() , dim='x').T#.to_dataset() + weights_k = make_xarray_from_dict(weights, "weight", ["k"], {"k": self.k}) + weights_k = xr.concat(weights_k.values(), dim="x").T # .to_dataset() # 4th: model in real space # y_model_eta =dict() @@ -479,88 +551,123 @@ def calc_gFT_apply(stancil, prior): # y_model_eta[xi] = xr.DataArray(y_model[xi], dims=['eta'], coords={'eta': eta, 'x': xi } , name="y_model") # y_data_eta[xi] = xr.DataArray(y_data[xi] , dims=['eta'], coords={'eta': eta, 'x': xi } , name="y_data") - eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters/2 - y_model_eta = make_xarray_from_dict(y_model, 'y_model', ['eta'], {'eta': eta} ) - y_data_eta = make_xarray_from_dict(y_data , 'y_data' , ['eta'], {'eta': eta} ) - - y_model_eta = xr.concat(y_model_eta.values(), dim='x').T#.to_dataset() - y_data_eta = xr.concat(y_data_eta.values() , dim='x').T#.to_dataset() + eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters / 2 + y_model_eta = make_xarray_from_dict(y_model, "y_model", ["eta"], {"eta": eta}) + y_data_eta = make_xarray_from_dict(y_data, "y_data", ["eta"], {"eta": eta}) + y_model_eta = xr.concat(y_model_eta.values(), dim="x").T # .to_dataset() + y_data_eta = xr.concat(y_data_eta.values(), dim="x").T # .to_dataset() # merge wavenumber datasets - self.GG = xr.merge([G_power_data, G_power_model, G_model_Z, GFT_model_coeff_A, GFT_model_coeff_B, weights_k]) - self.GG.attrs['ov'] = self.ov - self.GG.attrs['L'] = self.Lmeters - self.GG.attrs['Lpoints'] = self.Lpoints - self.GG.coords['N_per_stancil'] = ( ('x'), N_per_stancil) - self.GG.coords['spec_adjust'] = ( ('x'), Spec_adjust_per_stancil) - - #self.GG.expand_dims(dim='eta') + self.GG = xr.merge( + [ + G_power_data, + G_power_model, + G_model_Z, + GFT_model_coeff_A, + GFT_model_coeff_B, + weights_k, + ] + ) + self.GG.attrs["ov"] = self.ov + self.GG.attrs["L"] = self.Lmeters + self.GG.attrs["Lpoints"] = self.Lpoints + self.GG.coords["N_per_stancil"] = (("x"), N_per_stancil) + self.GG.coords["spec_adjust"] = (("x"), Spec_adjust_per_stancil) + + # self.GG.expand_dims(dim='eta') # eta = np.arange(0, self.L + self.dx, self.dx) - self.L/2 # self.GG.coords['eta'] = ( ('eta'), eta ) # #self.GG['win'] = ( ('eta'), np.insert(self.win, -1, self.win[-1])) - # create dataframe with fitted parameters and derive y_model and errors # reduce to valid values - PP2= dict() + PP2 = dict() for k, I in Pars.items(): if I is not np.nan: - PP2[k] =I - #print(Pars) - #print(PP2) + PP2[k] = I + # print(Pars) + # print(PP2) keys = Pars[next(iter(PP2))].keys() - keys_DF = list(set(keys) - set(['model_error_k', 'model_error_x'])) - params_dataframe = pd.DataFrame(index =keys_DF) - model_error_k_cos =dict() - model_error_k_sin =dict() - model_error_x =dict() - for xi,I in Pars.items(): - + keys_DF = list(set(keys) - set(["model_error_k", "model_error_x"])) + params_dataframe = pd.DataFrame(index=keys_DF) + model_error_k_cos = dict() + model_error_k_sin = dict() + model_error_x = dict() + for xi, I in Pars.items(): if I is not np.nan: - params_dataframe[xi] = [I[ki] for ki in keys_DF] - model_error_k_cos[xi] = xr.DataArray(I['model_error_k'][0:self.k.size], dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_cos') - model_error_k_sin[xi] = xr.DataArray(I['model_error_k'][self.k.size:], dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_sin') - - sta, ste = xi- self.Lmeters/2, xi+self.Lmeters/2 - #x_mask= (sta <= X) & (X <= ste) - x_pos = (np.round( (X[(sta <= X) & (X <= ste)] - sta)/ self.dx ) ).astype('int') - x_err = np.copy(eta) *np.nan + model_error_k_cos[xi] = xr.DataArray( + I["model_error_k"][0 : self.k.size], + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_cos", + ) + model_error_k_sin[xi] = xr.DataArray( + I["model_error_k"][self.k.size :], + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_sin", + ) + + sta, ste = xi - self.Lmeters / 2, xi + self.Lmeters / 2 + # x_mask= (sta <= X) & (X <= ste) + x_pos = (np.round((X[(sta <= X) & (X <= ste)] - sta) / self.dx)).astype( + "int" + ) + x_err = np.copy(eta) * np.nan # check sizes and adjust if necessary. - if x_pos.size > I['model_error_x'].size: - x_pos = x_pos[ 0 : I['model_error_x'].size ] - print('adjust x') - elif x_pos.size < I['model_error_x'].size: - I['model_error_x'] = I['model_error_x'][0:-1]# np.append(I['model_error_x'], I['model_error_x'][-1]) - print('adjust y') - - #print(x_pos.size , I['model_error_x'].size) - x_err[x_pos] = I['model_error_x'] - model_error_x[xi] = xr.DataArray(x_err, dims=['eta'], coords={'eta': eta, 'x': xi } , name='model_error_x') + if x_pos.size > I["model_error_x"].size: + x_pos = x_pos[0 : I["model_error_x"].size] + print("adjust x") + elif x_pos.size < I["model_error_x"].size: + I["model_error_x"] = I["model_error_x"][ + 0:-1 + ] # np.append(I['model_error_x'], I['model_error_x'][-1]) + print("adjust y") + + # print(x_pos.size , I['model_error_x'].size) + x_err[x_pos] = I["model_error_x"] + model_error_x[xi] = xr.DataArray( + x_err, + dims=["eta"], + coords={"eta": eta, "x": xi}, + name="model_error_x", + ) else: - model_error_k_cos[xi] = xr.DataArray(np.zeros(self.k.size)*np.nan, dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_cos') - model_error_k_sin[xi] = xr.DataArray(np.zeros(self.k.size)*np.nan, dims=['k'], coords={'k': self.k, 'x': xi } , name='model_error_k_sin') - - model_error_x[xi] = xr.DataArray(np.copy(eta) *np.nan, dims=['eta'], coords={'eta': eta, 'x': xi } , name='model_error_x') - - - self.GG['model_error_k_cos'] = xr.concat(model_error_k_cos.values(), dim='x').T - self.GG['model_error_k_sin'] = xr.concat(model_error_k_sin.values(), dim='x').T - - model_error_x = xr.concat(model_error_x.values(), dim='x').T - GG_x= xr.merge([y_model_eta, y_data_eta, model_error_x]) - #model_error_x + model_error_k_cos[xi] = xr.DataArray( + np.zeros(self.k.size) * np.nan, + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_cos", + ) + model_error_k_sin[xi] = xr.DataArray( + np.zeros(self.k.size) * np.nan, + dims=["k"], + coords={"k": self.k, "x": xi}, + name="model_error_k_sin", + ) + + model_error_x[xi] = xr.DataArray( + np.copy(eta) * np.nan, + dims=["eta"], + coords={"eta": eta, "x": xi}, + name="model_error_x", + ) + + self.GG["model_error_k_cos"] = xr.concat(model_error_k_cos.values(), dim="x").T + self.GG["model_error_k_sin"] = xr.concat(model_error_k_sin.values(), dim="x").T + + model_error_x = xr.concat(model_error_x.values(), dim="x").T + GG_x = xr.merge([y_model_eta, y_data_eta, model_error_x]) + # model_error_x return self.GG, GG_x, params_dataframe - - def calc_var(self): - Gmean = np.nanmean(self.G, 1) infmask = np.isinf(Gmean) @@ -569,57 +676,60 @@ def calc_var(self): # def parceval(self, add_attrs=True ): # return wavenumber_spectrogram.parceval(self, add_attrs= add_attrs ) - def parceval(self, add_attrs=True, weight_data=False ): + def parceval(self, add_attrs=True, weight_data=False): "test Parceval theorem" import copy + DATA = self.data L = self.Lmeters X = self.x # derive mean variances of stancils - #stancil_iter = create_chunk_boundaries_unit_lengths(L, self.xlims, ov= self.ov ) + # stancil_iter = create_chunk_boundaries_unit_lengths(L, self.xlims, ov= self.ov ) def get_stancil_var_apply(stancil): from scipy.signal import detrend + "returns the variance of yy for stancil" - x_mask= (stancil[0] < X) & (X <= stancil[-1]) + x_mask = (stancil[0] < X) & (X <= stancil[-1]) idata = DATA[x_mask] if len(idata) < 1: return stancil[1], np.nan, len(idata) idata = detrend(idata) # weight data - x_pos = (np.round( (X[x_mask] - stancil[0])/ 10.0 , 0) ).astype('int') + x_pos = (np.round((X[x_mask] - stancil[0]) / 10.0, 0)).astype("int") if weight_data: window = self.win[x_pos] - idata = idata * window * np.sqrt( np.var(idata) / np.var(( idata* window) ) ) + idata = ( + idata * window * np.sqrt(np.var(idata) / np.var((idata * window))) + ) return stancil[1], idata.var(), len(idata) - D_vars = list(map(get_stancil_var_apply, copy.copy(self.stancil_iter) )) + D_vars = list(map(get_stancil_var_apply, copy.copy(self.stancil_iter))) - stancil_vars, Nphotons =list(), 0 + stancil_vars, Nphotons = list(), 0 for I in D_vars: - stancil_vars.append(I[1] * I[2]) - Nphotons += I[2] + stancil_vars.append(I[1] * I[2]) + Nphotons += I[2] - stancil_weighted_variance = np.nansum(np.array(stancil_vars))/Nphotons + stancil_weighted_variance = np.nansum(np.array(stancil_vars)) / Nphotons - print('Parcevals Theorem:') - print('variance of timeseries: ', DATA.var()) - print('mean variance of stancils: ', stancil_weighted_variance ) - #print('variance of weighted timeseries: ',self.phi.var() ) - #self.calc_var(self) - print('variance of the optimzed windowed LS Spectrum: ', self.calc_var()) + print("Parcevals Theorem:") + print("variance of timeseries: ", DATA.var()) + print("mean variance of stancils: ", stancil_weighted_variance) + # print('variance of weighted timeseries: ',self.phi.var() ) + # self.calc_var(self) + print("variance of the optimzed windowed LS Spectrum: ", self.calc_var()) if add_attrs: - self.G.attrs['variance_unweighted_data'] = DATA.var() - self.G.attrs['mean_variance_stancils'] = np.nanmean(np.array(stancil_vars) ) - self.G.attrs['mean_variance_LS_pwelch_spectrum'] = self.calc_var() - - - def mean_spectral_error(self, mask=None, confidence = 0.95 ): - return wavenumber_spectrogram.mean_spectral_error(self, mask=mask, confidence= confidence ) - + self.G.attrs["variance_unweighted_data"] = DATA.var() + self.G.attrs["mean_variance_stancils"] = np.nanmean(np.array(stancil_vars)) + self.G.attrs["mean_variance_LS_pwelch_spectrum"] = self.calc_var() + def mean_spectral_error(self, mask=None, confidence=0.95): + return wavenumber_spectrogram.mean_spectral_error( + self, mask=mask, confidence=confidence + ) def complex_represenation(p_hat, M, N_x_full): @@ -635,13 +745,13 @@ def complex_represenation(p_hat, M, N_x_full): this returns a power spectral density with the same variance as the data without gaps. """ - Z = p_hat[0:M] - p_hat[M:] *1j - Z = Z * (N_x_full/2+1) # this + Z = p_hat[0:M] - p_hat[M:] * 1j + Z = Z * (N_x_full / 2 + 1) # this return Z -def Z_to_power_gFT(Z, dk, N_x, N_x_full): - """ compute the 1d Power spectral density of a field Z +def Z_to_power_gFT(Z, dk, N_x, N_x_full): + """compute the 1d Power spectral density of a field Z inputs: Z complex fourier coefficients, output of .complex_represenation method dk delta wavenumber asssuming Z is on regular grid @@ -654,13 +764,13 @@ def Z_to_power_gFT(Z, dk, N_x, N_x_full): prefer spec_complete """ - spec = 2.*(Z*Z.conj()).real + spec = 2.0 * (Z * Z.conj()).real - neven = True if (N_x_full%2) else False + neven = True if (N_x_full % 2) else False # the zeroth frequency should be counted only once - spec[0] = spec[0]/2. + spec[0] = spec[0] / 2.0 if neven: - spec[-1] = spec[-1]/2. + spec[-1] = spec[-1] / 2.0 # spectral density respesenting the incomplete data ( [p_hat]^2 / dk) spec_incomplete = spec / dk / N_x / N_x_full @@ -669,8 +779,9 @@ def Z_to_power_gFT(Z, dk, N_x, N_x_full): return spec_incomplete, spec_complete -def power_from_model(p_hat, dk, M, N_x, N_x_full): - """ compute the 1d Power spectral density from the model coefficients in p_hat + +def power_from_model(p_hat, dk, M, N_x, N_x_full): + """compute the 1d Power spectral density from the model coefficients in p_hat p_hat is the model coefficient matrix M size of the model vector/2, size of k @@ -683,46 +794,44 @@ def power_from_model(p_hat, dk, M, N_x, N_x_full): """ Z = complex_represenation(p_hat, M, N_x_full) - spec, _ = Z_to_power_gFT(Z, dk, N_x, N_x_full) # use spec_incomplete + spec, _ = Z_to_power_gFT(Z, dk, N_x, N_x_full) # use spec_incomplete # spectral density respesenting the incomplete data return spec - class generalized_Fourier(object): def __init__(self, x, ydata, k): - """ non_dimensionalize (bool, default=True) if True, then the data and R_data_uncertainty is non-dimensionalized by the std of the data """ import numpy as np from numpy import linalg - self.x, self.ydata, self.k = x, ydata, k - self.M = self.k.size # number of wavenumbers - self.N = self.x.size# number of datapoints + self.M = self.k.size # number of wavenumbers + self.N = self.x.size # number of datapoints # if self.non_dimensionalize: # self.ydata_star = (self.ydata - self.ydata_mean)/self.ydata_std # else: - #self.ydata_star = self.ydata + # self.ydata_star = self.ydata if ydata is not None: - - self.ydata_var = self.ydata.var() - self.ydata_mean = self.ydata.mean() + self.ydata_var = self.ydata.var() + self.ydata_mean = self.ydata.mean() # test if the data is real, not nan and not inf - assert np.isrealobj(self.ydata), 'data is not real' - assert np.isfinite(self.ydata).all(), 'data is not finite' - assert np.isnan(self.ydata).all() == False, 'data is not nan' + assert np.isrealobj(self.ydata), "data is not real" + assert np.isfinite(self.ydata).all(), "data is not finite" + assert np.isnan(self.ydata).all() == False, "data is not nan" # data matrix - def get_H(self, xx = None): + def get_H(self, xx=None): xx = self.x if xx is None else xx - self.H = np.vstack([ np.cos(np.outer(xx, self.k)).T , np.sin(np.outer(xx, self.k)).T ] ).T - return self.H + self.H = np.vstack( + [np.cos(np.outer(xx, self.k)).T, np.sin(np.outer(xx, self.k)).T] + ).T + return self.H def define_problem(self, P_weight, R_data_uncertainty): """ @@ -730,22 +839,23 @@ def define_problem(self, P_weight, R_data_uncertainty): if P = 0, then the corresponding wavenumber is not penalized, not weighted if P != 0, then the corresponding wavenumber is penalized, i.e. it is put more weight on it. data_uncertainty (observed) uncertainy of the datain units of the data , can be vector of length N, or scaler - """ + """ - self.H = self.get_H() - #self.P = np.diag(1/penalties) # penalty 2M x 2M - #self.R = np.diag( data_uncertainty) #Noise Prior N x N - self.P_1d = np.concatenate([ P_weight , P_weight ]) # these are weights again .. - self.R_1d = R_data_uncertainty + self.H = self.get_H() + # self.P = np.diag(1/penalties) # penalty 2M x 2M + # self.R = np.diag( data_uncertainty) #Noise Prior N x N + self.P_1d = np.concatenate([P_weight, P_weight]) # these are weights again .. + self.R_1d = R_data_uncertainty def solve(self): from numpy import linalg + inv = linalg.inv """ solves the linear inverse problem, return hessian and p_hat self.p_hat = is also non-dimensional """ - + # standard inversion # H = self.H # P = self.P @@ -764,10 +874,10 @@ def solve(self): R_1d = self.R_1d y = self.ydata - H_T_R_inv = H.T * (1/R_1d) - Hess = (H_T_R_inv @ H ) + np.diag(1/P_1d) - Hess_inv = inv(Hess) - p_hat = Hess_inv @ H_T_R_inv @ y + H_T_R_inv = H.T * (1 / R_1d) + Hess = (H_T_R_inv @ H) + np.diag(1 / P_1d) + Hess_inv = inv(Hess) + p_hat = Hess_inv @ H_T_R_inv @ y self.Hess, self.Hess_inv, self.p_hat = Hess, Hess_inv, p_hat del H_T_R_inv @@ -782,15 +892,15 @@ def solve(self): # if 'p_hat' not in self.__dict__: # raise ValueError('p_hat does not exist, please invert for model first') # return self.model_nondimensional() * self.ydata_std + self.ydata_mean - + def model(self): - " returns the model dimensional units" - if 'p_hat' not in self.__dict__: - raise ValueError('p_hat does not exist, please invert for model first') + "returns the model dimensional units" + if "p_hat" not in self.__dict__: + raise ValueError("p_hat does not exist, please invert for model first") return (self.p_hat * self.H).sum(1) def parceval(self, dk, Nx_full): - """ compute the 1d Power spectral density from the model coefficients in p_hat + """compute the 1d Power spectral density from the model coefficients in p_hat p_hat is the model coefficient matrix M size of the model vector/2, size of k @@ -805,78 +915,83 @@ def parceval(self, dk, Nx_full): p_hat = self.p_hat M = self.M Nx = self.N - - Z = complex_represenation(p_hat, M, Nx_full ) - spec_incomplete, spec_complete = Z_to_power_gFT(Z, dk, Nx, Nx_full) # use spec_incomplete + + Z = complex_represenation(p_hat, M, Nx_full) + spec_incomplete, spec_complete = Z_to_power_gFT( + Z, dk, Nx, Nx_full + ) # use spec_incomplete var_spec_incomplete = np.trapz(spec_incomplete, x=self.k) var_spec_complete = np.trapz(spec_complete, x=self.k) # calculate adjustment factor forspectral density - model_var =self.model().var() - spec_adjust = max( var_spec_incomplete / model_var, model_var / var_spec_incomplete ) + model_var = self.model().var() + spec_adjust = max( + var_spec_incomplete / model_var, model_var / var_spec_incomplete + ) pars = { - 'model_var': model_var, - 'var_spec_incomplete': var_spec_incomplete, - 'var_spec_complete': var_spec_complete, - 'spec_adjust': spec_adjust - } + "model_var": model_var, + "var_spec_incomplete": var_spec_incomplete, + "var_spec_complete": var_spec_complete, + "spec_adjust": spec_adjust, + } # spectral density respesenting the incomplete data return pars - - def get_stats(self, dk, Nx_full, print_flag=False): - #model_error_k = np.diag(self.Hess_inv) - #model_error_real = ((self.H**2) @ self.Hess_inv).sum(1) + def get_stats(self, dk, Nx_full, print_flag=False): + # model_error_k = np.diag(self.Hess_inv) + # model_error_real = ((self.H**2) @ self.Hess_inv).sum(1) residual = self.ydata - self.model() Lmeters = self.x[-1] - self.x[0] pars = { - 'data_var': self.ydata.var(), - 'model_var': self.model().var(), - 'residual_var' : residual.var(), - #'normalized_residual' : residual.var() /self.R_1d.mean(), - 'model_error_k' : np.diag(self.Hess_inv), - 'model_error_x' : ((self.H**2) @ self.Hess_inv).sum(1), - 'var_sum' : self.ydata.var() - self.model().var() -residual.var() + "data_var": self.ydata.var(), + "model_var": self.model().var(), + "residual_var": residual.var(), + #'normalized_residual' : residual.var() /self.R_1d.mean(), + "model_error_k": np.diag(self.Hess_inv), + "model_error_x": ((self.H**2) @ self.Hess_inv).sum(1), + "var_sum": self.ydata.var() - self.model().var() - residual.var(), } pars2 = self.parceval(dk, Nx_full) - for k,I in pars2.items(): + for k, I in pars2.items(): pars[k] = I if print_flag: - for ki in ['data_var', 'model_var', 'residual_var','var_sum', 'var_spec_incomplete', 'var_spec_complete', 'spec_adjust']: - print( ki.ljust(20) + str(pars[ki]) ) + for ki in [ + "data_var", + "model_var", + "residual_var", + "var_sum", + "var_spec_incomplete", + "var_spec_complete", + "spec_adjust", + ]: + print(ki.ljust(20) + str(pars[ki])) return pars - class get_prior_spec(object): def __init__(self, freq, data): - - - """ - - """ + """ """ import numpy as np import lmfit as LM - self.LM =LM + + self.LM = LM self.data = data self.freq = freq - if self.freq[0] ==0: - - self.freq_cut_flag= True + if self.freq[0] == 0: + self.freq_cut_flag = True self.freq, self.data = self.freq[1:], self.data[1:] else: - self.freq_cut_flag= False + self.freq_cut_flag = False def set_parameters(self, flim=None): - """ sets parameters fpr optimization setf.freq freq grid used for optimization @@ -888,95 +1003,101 @@ def set_parameters(self, flim=None): """ import numpy as np - params = self.LM.Parameters() - def get_peak_pos(y, smooth =30): + params = self.LM.Parameters() - yy =self.runningmean(y, smooth, tailcopy=False) + def get_peak_pos(y, smooth=30): + yy = self.runningmean(y, smooth, tailcopy=False) yy[np.isnan(yy)] = 0 return yy.argmax() if flim is not None: - iflim= abs(self.freq - flim).argmin() - f_max = self.freq[0:iflim][ get_peak_pos(abs(self.data[0:iflim]), 30) ] + iflim = abs(self.freq - flim).argmin() + f_max = self.freq[0:iflim][get_peak_pos(abs(self.data[0:iflim]), 30)] else: - f_max = self.freq[ get_peak_pos(abs(self.data), 30) ] + f_max = self.freq[get_peak_pos(abs(self.data), 30)] self.f_max = f_max # p_smothed = self.runningmean(np.abs(self.Z ), 20, tailcopy=True) # f_max = self.freq[p_smothed[~np.isnan(p_smothed)].argmax()] - params.add('f_max', f_max , min=f_max*0.2, max=f_max*1.5, vary=True) - params.add('amp', 0.05 , min=.0001, max=.1, vary=True) - params.add('gamma', 1 , min=1, max=3.3, vary=False) - params.add('alpha', 1 , min=0, max= 0.95 * np.pi /2, vary=True) + params.add("f_max", f_max, min=f_max * 0.2, max=f_max * 1.5, vary=True) + params.add("amp", 0.05, min=0.0001, max=0.1, vary=True) + params.add("gamma", 1, min=1, max=3.3, vary=False) + params.add("alpha", 1, min=0, max=0.95 * np.pi / 2, vary=True) self.params = params return params def model_func(self, f, params): - return self.non_dim_spec_model(f, params['f_max'].value, params['amp'].value, params['gamma'].value) + return self.non_dim_spec_model( + f, params["f_max"].value, params["amp"].value, params["gamma"].value + ) - def non_dim_spec_model(self, f, f_max, amp, gamma=1, angle_rad = 0): + def non_dim_spec_model(self, f, f_max, amp, gamma=1, angle_rad=0): import icesat2_tracks.local_modules.JONSWAP_gamma as spectal_models - U= 20 # results are incensitive to U + + U = 20 # results are incensitive to U f_true = f * np.cos(angle_rad) - model= spectal_models.JONSWAP_default_alt(f_true, f_max, 20 , gamma=gamma) - model = amp * model/np.nanmean(model) - model[np.isnan(model)] =0 + model = spectal_models.JONSWAP_default_alt(f_true, f_max, 20, gamma=gamma) + model = amp * model / np.nanmean(model) + model[np.isnan(model)] = 0 return model - def objective_func(self,params, data, model_func, freq, weight= None): - f_weight = np.linspace(1, .1, freq.size) + def objective_func(self, params, data, model_func, freq, weight=None): + f_weight = np.linspace(1, 0.1, freq.size) model = model_func(freq, params) - cost =( abs(data - model) * f_weight / data.std() )**2 - return cost.sum() + 4 *np.abs(self.f_max -params['f_max'])**2 / self.f_max - - + cost = (abs(data - model) * f_weight / data.std()) ** 2 + return cost.sum() + 4 * np.abs(self.f_max - params["f_max"]) ** 2 / self.f_max def test_ojective_func(self, model_func): return self.objective_func(self.params, self.data, model_func, self.freq) - - def optimize(self, fitting_args= None , method='dual_annealing', max_nfev=None): - - + def optimize(self, fitting_args=None, method="dual_annealing", max_nfev=None): if fitting_args is None: fitting_args = (self.data, self.model_func, self.freq) - #fit_kws= {'maxfun': 1e7} - fit_kws= {'maxfun': 1e5} + # fit_kws= {'maxfun': 1e7} + fit_kws = {"maxfun": 1e5} self.weight_func = fitting_args[1] - self.fitter = self.LM.minimize(self.objective_func, self.params, args=fitting_args, method=method, max_nfev=max_nfev, **fit_kws) + self.fitter = self.LM.minimize( + self.objective_func, + self.params, + args=fitting_args, + method=method, + max_nfev=max_nfev, + **fit_kws, + ) return self.fitter def plot_data(self): import matplotlib.pyplot as plt - plt.plot(self.freq, self.data, 'k') + + plt.plot(self.freq, self.data, "k") def plot_model(self, pars): import matplotlib.pyplot as plt - plt.plot(self.freq, self.model_func(self.freq, pars), 'b--' ) + + plt.plot(self.freq, self.model_func(self.freq, pars), "b--") def runningmean(self, var, m, tailcopy=False): - m=int(m) - s =var.shape - if s[0] <= 2*m: - print('0 Dimension is smaller then averaging length') + m = int(m) + s = var.shape + if s[0] <= 2 * m: + print("0 Dimension is smaller then averaging length") return - rr=np.asarray(var)*np.nan - #print(type(rr)) - var_range=np.arange(m,int(s[0])-m-1,1) - for i in var_range[np.isfinite(var[m:int(s[0])-m-1])]: - #rm.append(var[i-m:i+m].mean()) - rr[int(i)]=np.nanmean(var[i-m:i+m]) + rr = np.asarray(var) * np.nan + # print(type(rr)) + var_range = np.arange(m, int(s[0]) - m - 1, 1) + for i in var_range[np.isfinite(var[m : int(s[0]) - m - 1])]: + # rm.append(var[i-m:i+m].mean()) + rr[int(i)] = np.nanmean(var[i - m : i + m]) if tailcopy: - rr[0:m]=rr[m+1] - rr[-m-1:-1]=rr[-m-2] + rr[0:m] = rr[m + 1] + rr[-m - 1 : -1] = rr[-m - 2] return rr - - def create_weight(self,freq=None, plot_flag=True, flim= None, max_nfev=None): + def create_weight(self, freq=None, plot_flag=True, flim=None, max_nfev=None): """ this returns a weight function that can be used for the Least square fitting. """ @@ -985,7 +1106,7 @@ def create_weight(self,freq=None, plot_flag=True, flim= None, max_nfev=None): ff = self.freq else: ff = freq - if 'params' not in self.__dict__: + if "params" not in self.__dict__: self.set_parameters(flim=flim) self.optimize(max_nfev=max_nfev) @@ -995,7 +1116,6 @@ def create_weight(self,freq=None, plot_flag=True, flim= None, max_nfev=None): self.plot_data() self.plot_model(self.fitter.params) - result = self.model_func(ff, self.fitter.params) if self.freq_cut_flag and freq is None: result = np.insert(result, 0, 0) diff --git a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py index 6d6f5754..94deba31 100644 --- a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py +++ b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py @@ -1,6 +1,4 @@ -# %% import os, sys -#execfile(os.environ['PYTHONSTARTUP']) """ This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. @@ -11,502 +9,511 @@ mconfig, xr, color_schemes, - font_for_pres, plt, np, - font_for_print + font_for_print, ) -#%matplotlib inline - -import icesat2_tracks.ICEsat2_SI_tools.convert_GPS_time as cGPS -import h5py import icesat2_tracks.ICEsat2_SI_tools.io as io import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec import time -import imp -import copy -import icesat2_tracks.ICEsat2_SI_tools.spicke_remover -import datetime from matplotlib.gridspec import GridSpec import icesat2_tracks.ICEsat2_SI_tools.generalized_FT as gFT from scipy.ndimage.measurements import label import icesat2_tracks.local_modules.m_tools_ph3 as MT from icesat2_tracks.local_modules import m_general_ph3 as M -#import s3fs -# %% -track_name, batch_key, test_flag = io.init_from_input(sys.argv) # loads standard experiment -#track_name, batch_key, test_flag = '20190605061807_10380310_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190601094826_09790312_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190207111114_06260210_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190208152826_06440210_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190213133330_07190212_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190207002436_06190212_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190206022433_06050212_004_01', 'SH_batch02', False - -#track_name, batch_key, test_flag = 'SH_20190101_00570212', 'SH_batch04', True -#track_name, batch_key, test_flag = 'SH_20190219_08070210', 'SH_batchminimal', True - +track_name, batch_key, test_flag = io.init_from_input( + sys.argv +) # loads standard experiment +hemis, batch = batch_key.split("_") -#track_name, batch_key, test_flag = '20190219073735_08070210_004_01', 'SH_batch02', False -#print(track_name, batch_key, test_flag) -hemis, batch = batch_key.split('_') - -load_path = mconfig['paths']['work'] +batch_key+'/B02_spectra/' -load_file = load_path + 'B02_' + track_name #+ '.nc' -plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/' + track_name + '/' +load_path = mconfig["paths"]["work"] + batch_key + "/B02_spectra/" +load_file = load_path + "B02_" + track_name # + '.nc' +plot_path = ( + mconfig["paths"]["plot"] + "/" + hemis + "/" + batch_key + "/" + track_name + "/" +) MT.mkdirs_r(plot_path) -Gk = xr.open_dataset(load_file+'_gFT_k.nc') -Gx = xr.open_dataset(load_file+'_gFT_x.nc') +Gk = xr.open_dataset(load_file + "_gFT_k.nc") +Gx = xr.open_dataset(load_file + "_gFT_x.nc") -Gfft = xr.open_dataset(load_file+'_FFT.nc') -# print(Gk) -# print(Gx) +Gfft = xr.open_dataset(load_file + "_FFT.nc") time.sleep(2) -# %% -# for ibeam in Gk.beam: -# print(Gk.sel(beam=ibeam).gFT_PSD_data.data) - -# %% -all_beams = mconfig['beams']['all_beams'] -high_beams = mconfig['beams']['high_beams'] -low_beams = mconfig['beams']['low_beams'] -#Gfilt = io.load_pandas_table_dict(track_name + '_B01_regridded', load_path) # rhis is the rar photon data -#Gd = io.load_pandas_table_dict(track_name + '_B01_binned' , load_path) # +all_beams = mconfig["beams"]["all_beams"] +high_beams = mconfig["beams"]["high_beams"] +low_beams = mconfig["beams"]["low_beams"] color_schemes.colormaps2(21) -# %% check paths (again) - -col_dict= color_schemes.rels -F = M.figure_axis_xy(9, 3, view_scale =0.5) +col_dict = color_schemes.rels +F = M.figure_axis_xy(9, 3, view_scale=0.5) -plt.subplot(1,3, 1) -plt.title(track_name , loc ='left') +plt.subplot(1, 3, 1) +plt.title(track_name, loc="left") for k in all_beams: I = Gk.sel(beam=k) I2 = Gx.sel(beam=k) - plt.plot(I['lon'], I['lat'], '.', c= col_dict[k], markersize = 0.7, linewidth =0.3) - plt.plot(I2['lon'], I2['lat'], '|', c= col_dict[k], markersize = 0.7 ) + plt.plot(I["lon"], I["lat"], ".", c=col_dict[k], markersize=0.7, linewidth=0.3) + plt.plot(I2["lon"], I2["lat"], "|", c=col_dict[k], markersize=0.7) -plt.xlabel('lon') -plt.ylabel('lat') +plt.xlabel("lon") +plt.ylabel("lat") -plt.subplot(1,3, 2) +plt.subplot(1, 3, 2) -xscale= 1e3 +xscale = 1e3 for k in all_beams: I = Gk.sel(beam=k) - plt.plot( I['x_coord']/xscale , I['y_coord']/xscale, '.' , c= col_dict[k] , linewidth = 0.8, markersize = 0.8 ) - # I2 = G_gFT[k] - # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) + plt.plot( + I["x_coord"] / xscale, + I["y_coord"] / xscale, + ".", + c=col_dict[k], + linewidth=0.8, + markersize=0.8, + ) -plt.xlabel('x_coord (km)') -plt.ylabel('y_coord (km)') +plt.xlabel("x_coord (km)") +plt.ylabel("y_coord (km)") -plt.subplot(1,3, 3) +plt.subplot(1, 3, 3) -xscale= 1e3 +xscale = 1e3 for k in all_beams: I = Gk.sel(beam=k) - plt.plot( I['x_coord']/xscale , (I['y_coord']-I['y_coord'][0]), '.' , c= col_dict[k], linewidth = 0.8, markersize = 0.8) - # I2 = G_gFT[k] - # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) + plt.plot( + I["x_coord"] / xscale, + (I["y_coord"] - I["y_coord"][0]), + ".", + c=col_dict[k], + linewidth=0.8, + markersize=0.8, + ) -plt.xlabel('x_coord (km)') -plt.ylabel('y_coord deviation (m)') +plt.xlabel("x_coord (km)") +plt.ylabel("y_coord deviation (m)") -F.save_light(path=plot_path, name = 'B03_specs_coord_check') +F.save_light(path=plot_path, name="B03_specs_coord_check") -# %% def dict_weighted_mean(Gdict, weight_key): """ returns the weighted meean of a dict of xarray, data_arrays weight_key must be in the xr.DataArrays """ - #Gdict = G_rar_fft - #weight_key='N_per_stancil' - akey = list( Gdict.keys() )[0] + akey = list(Gdict.keys())[0] GSUM = Gdict[akey].copy() - GSUM.data = np.zeros(GSUM.shape) + GSUM.data = np.zeros(GSUM.shape) N_per_stancil = GSUM.N_per_stancil * 0 - N_photons = np.zeros(GSUM.N_per_stancil.size) + N_photons = np.zeros(GSUM.N_per_stancil.size) - counter= 0 - for k,I in Gdict.items(): - #print(k) - I =I.squeeze() - print(len(I.x) ) - if len(I.x) !=0: - GSUM += I.where( ~np.isnan(I), 0) * I[weight_key] #.sel(x=GSUM.x) - N_per_stancil += I[weight_key] - if 'N_photons' in GSUM.coords: - N_photons += I['N_photons'] - counter+=1 + counter = 0 + for I in Gdict.items(): + I = I.squeeze() + if len(I.x) != 0: + GSUM += I.where(~np.isnan(I), 0) * I[weight_key] # .sel(x=GSUM.x) + N_per_stancil += I[weight_key] + if "N_photons" in GSUM.coords: + N_photons += I["N_photons"] + counter += 1 - GSUM = GSUM / N_per_stancil + GSUM = GSUM / N_per_stancil - if 'N_photons' in GSUM.coords: - GSUM.coords['N_photons'] = (('x', 'beam'), np.expand_dims(N_photons, 1) ) + if "N_photons" in GSUM.coords: + GSUM.coords["N_photons"] = (("x", "beam"), np.expand_dims(N_photons, 1)) - GSUM['beam'] = ['weighted_mean'] - GSUM.name='power_spec' + GSUM["beam"] = ["weighted_mean"] + GSUM.name = "power_spec" return GSUM -G_gFT_wmean = (Gk['gFT_PSD_data'].where( ~np.isnan(Gk['gFT_PSD_data']), 0) * Gk['N_per_stancil']).sum('beam')/ Gk['N_per_stancil'].sum('beam') -G_gFT_wmean['N_per_stancil'] = Gk['N_per_stancil'].sum('beam') - -G_fft_wmean = (Gfft.where( ~np.isnan(Gfft), 0) * Gfft['N_per_stancil']).sum('beam')/ Gfft['N_per_stancil'].sum('beam') -G_fft_wmean['N_per_stancil'] = Gfft['N_per_stancil'].sum('beam') +G_gFT_wmean = ( + Gk["gFT_PSD_data"].where(~np.isnan(Gk["gFT_PSD_data"]), 0) * Gk["N_per_stancil"] +).sum("beam") / Gk["N_per_stancil"].sum("beam") +G_gFT_wmean["N_per_stancil"] = Gk["N_per_stancil"].sum("beam") +G_fft_wmean = (Gfft.where(~np.isnan(Gfft), 0) * Gfft["N_per_stancil"]).sum( + "beam" +) / Gfft["N_per_stancil"].sum("beam") +G_fft_wmean["N_per_stancil"] = Gfft["N_per_stancil"].sum("beam") -# %% plot -def plot_wavenumber_spectrogram(ax, Gi, clev, title= None, plot_photon_density=True ): - if Gi.k[0] ==0: - Gi= Gi.sel(k=Gi.k[1:]) - x_lambda= 2 * np.pi/Gi.k - plt.pcolormesh(Gi.x/1e3, x_lambda , Gi, cmap=plt.cm.ocean_r , vmin = clev[0], vmax = clev[-1]) +def plot_wavenumber_spectrogram(ax, Gi, clev, title=None, plot_photon_density=True): + if Gi.k[0] == 0: + Gi = Gi.sel(k=Gi.k[1:]) + x_lambda = 2 * np.pi / Gi.k + plt.pcolormesh( + Gi.x / 1e3, x_lambda, Gi, cmap=plt.cm.ocean_r, vmin=clev[0], vmax=clev[-1] + ) - ax.set_yscale('log') - # plt.colorbar(orientation='vertical', pad=0.06, label='Spectral Power (m^2/m)') + ax.set_yscale("log") if plot_photon_density: + plt.plot( + Gi.x / 1e3, + x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, + c="black", + linewidth=0.8, + label="NAN-density", + ) + plt.fill_between( + Gi.x / 1e3, + x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, + 0, + color="gray", + alpha=0.3, + ) + ax.axhline(30, color="black", linewidth=0.3) - plt.plot(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10 , c='black', linewidth= 0.8, label='NAN-density' ) - plt.fill_between(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10, 0, color='gray', alpha = 0.3) - ax.axhline(30, color='black', linewidth=0.3) - - #plt.xlabel('Distance from the Ice Edge (km)') plt.ylim(x_lambda[-1], x_lambda[0]) - plt.title(title, loc='left') + plt.title(title, loc="left") + -#Gplot = G.rolling(x=5, min_periods= 1, center=True).mean() -#Gmean = G_gFT_wmean.rolling(x=2, min_periods= 1, center=True).mean() Gmean = G_gFT_wmean.rolling(k=5, center=True).mean() -#Gmean = Gmean.where(~np.isnan(Gmean), 0) + try: - k_max_range = Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1.25 + k_max_range = ( + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 0.75, + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1, + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1.25, + ) except: - k_max_range = Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1.25 - + k_max_range = ( + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 0.75, + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1, + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1.25, + ) -# %% font_for_print() -F = M.figure_axis_xy(6.5, 5.6, container= True, view_scale =1) +F = M.figure_axis_xy(6.5, 5.6, container=True, view_scale=1) Lmeters = Gk.L.data[0] -plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) -gs = GridSpec(3,3, wspace=0.2, hspace=.5)#figure=fig, -#clev=np.arange(0, 6, 0.1)*3 - -#%matplotlib inline +plt.suptitle("gFT Slope Spectrograms\n" + track_name, y=0.98) +gs = GridSpec(3, 3, wspace=0.2, hspace=0.5) -# define mean first for colorbar -Gplot = G_gFT_wmean.squeeze().rolling(k=10, min_periods= 1, center=True).median().rolling(x=3, min_periods= 1, center=True).median() +Gplot = ( + G_gFT_wmean.squeeze() + .rolling(k=10, min_periods=1, center=True) + .median() + .rolling(x=3, min_periods=1, center=True) + .median() +) dd = 10 * np.log10(Gplot) -dd= dd.where(~np.isinf(dd), np.nan ) -clev_log = M.clevels( [dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31)* 1 - -#clev = M.clevels( [Gmean.quantile(0.6).data * 1e4, Gmean.quantile(0.99).data * 1e4], 31)/ 1e4 +dd = dd.where(~np.isinf(dd), np.nan) +clev_log = M.clevels([dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31) * 1 -xlims= Gmean.x[0]/1e3, Gmean.x[-1]/1e3 +xlims = Gmean.x[0] / 1e3, Gmean.x[-1] / 1e3 -k =high_beams[0] -for pos, k, pflag in zip([gs[0, 0],gs[0, 1],gs[0, 2] ], high_beams, [True, False, False] ): +k = high_beams[0] +for pos, k, pflag in zip( + [gs[0, 0], gs[0, 1], gs[0, 2]], high_beams, [True, False, False] +): ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() - #Gplot.mean('x').plot() + Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() dd2 = 10 * np.log10(Gplot) - dd2= dd2.where(~np.isinf(dd2), np.nan ) - plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k + ' unsmoothed', plot_photon_density=True ) + dd2 = dd2.where(~np.isinf(dd2), np.nan) + plot_wavenumber_spectrogram( + ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True + ) plt.xlim(xlims) - # if pflag: - plt.ylabel('Wave length\n(meters)') + plt.ylabel("Wave length\n(meters)") plt.legend() -for pos, k, pflag in zip([gs[1, 0],gs[1, 1],gs[1, 2] ], low_beams, [True, False, False] ): +for pos, k, pflag in zip( + [gs[1, 0], gs[1, 1], gs[1, 2]], low_beams, [True, False, False] +): ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() - #Gplot.mean('x').plot() + Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() dd2 = 10 * np.log10(Gplot) - dd2= dd2.where(~np.isinf(dd2), np.nan ) - plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k+ ' unsmoothed', plot_photon_density=True ) + dd2 = dd2.where(~np.isinf(dd2), np.nan) + plot_wavenumber_spectrogram( + ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True + ) plt.xlim(xlims) - # if pflag: - plt.ylabel('Wave length\n(meters)') + plt.ylabel("Wave length\n(meters)") plt.legend() ax0 = F.fig.add_subplot(gs[2, 0]) -plot_wavenumber_spectrogram(ax0, dd, clev_log , title ='smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$', plot_photon_density= True) +plot_wavenumber_spectrogram( + ax0, + dd, + clev_log, + title="smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$", + plot_photon_density=True, +) plt.xlim(xlims) -# plt.plot(Gplot.x/1e3, 10* nan_list +20 , c='black', label='NAN-density' ) -# ax0.axhline(30, color='black', linewidth=0.5) - -ax0.axhline(2* np.pi/k_max_range[0], color='red', linestyle= '--', linewidth= 0.5) -ax0.axhline(2* np.pi/k_max_range[1], color='red', linestyle= '-', linewidth= 0.5) -ax0.axhline(2* np.pi/k_max_range[2], color='red', linestyle= '--', linewidth= 0.5) +ax0.axhline(2 * np.pi / k_max_range[0], color="red", linestyle="--", linewidth=0.5) +ax0.axhline(2 * np.pi / k_max_range[1], color="red", linestyle="-", linewidth=0.5) +ax0.axhline(2 * np.pi / k_max_range[2], color="red", linestyle="--", linewidth=0.5) if pflag: - plt.ylabel('Wave length\n(meters)') + plt.ylabel("Wave length\n(meters)") plt.legend() pos = gs[2, 1] ax0 = F.fig.add_subplot(pos) -plt.title('Photons density ($m^{-1}$)', loc='left') +plt.title("Photons density ($m^{-1}$)", loc="left") for k in all_beams: - I = Gk.sel(beam = k)['gFT_PSD_data'] - plt.plot(Gplot.x/1e3, I.N_photons/I.L.data, label=k, linewidth=0.8) -plt.plot(Gplot.x/1e3, G_gFT_wmean.N_per_stancil/3/I.L.data , c='black', label='ave Photons' , linewidth=0.8) + I = Gk.sel(beam=k)["gFT_PSD_data"] + plt.plot(Gplot.x / 1e3, I.N_photons / I.L.data, label=k, linewidth=0.8) +plt.plot( + Gplot.x / 1e3, + G_gFT_wmean.N_per_stancil / 3 / I.L.data, + c="black", + label="ave Photons", + linewidth=0.8, +) plt.xlim(xlims) -plt.xlabel('Distance from the Ice Edge (km)') +plt.xlabel("Distance from the Ice Edge (km)") pos = gs[2, 2] ax0 = F.fig.add_subplot(pos) -ax0.set_yscale('log') +ax0.set_yscale("log") -plt.title('Peak Spectal Power', loc='left') +plt.title("Peak Spectal Power", loc="left") x0 = Gk.x[0].data for k in all_beams: - I = Gk.sel(beam = k)['gFT_PSD_data'] - plt.scatter(I.x.data/1e3, I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k').data , s=0.5, marker='.', color='red', alpha= 0.3) - - I= Gfft.sel(beam = k)#.to_array() - #I= I[:,I.N_per_stancil >= I.N_per_stancil.max().data*0.9] - plt.scatter( (x0 +I.x.data)/1e3, I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , s=0.5, marker='.', c='blue', alpha= 0.3) + I = Gk.sel(beam=k)["gFT_PSD_data"] + plt.scatter( + I.x.data / 1e3, + I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k").data, + s=0.5, + marker=".", + color="red", + alpha=0.3, + ) + I = Gfft.sel(beam=k) + plt.scatter( + (x0 + I.x.data) / 1e3, + I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + s=0.5, + marker=".", + c="blue", + alpha=0.3, + ) + + +Gplot = G_fft_wmean.squeeze() +Gplot = Gplot.power_spec[:, Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data * 0.9] +plt.plot( + (x0 + Gplot.x) / 1e3, + Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + ".", + markersize=1.5, + c="blue", + label="FFT", +) +Gplot = G_gFT_wmean.squeeze() +plt.plot( + Gplot.x / 1e3, + Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + ".", + markersize=1.5, + c="red", + label="gFT", +) -Gplot= G_fft_wmean.squeeze() -Gplot = Gplot.power_spec[:,Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data*0.9] -plt.plot( (x0 + Gplot.x)/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.', markersize=1.5 , c='blue', label= 'FFT') +plt.ylabel("1e-3 $(m)^2~m$") +plt.legend() -Gplot= G_gFT_wmean.squeeze() -plt.plot( Gplot.x/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.' , markersize=1.5, c='red', label= 'gFT') +F.save_light(path=plot_path, name="B03_specs_L" + str(Lmeters)) -plt.ylabel('1e-3 $(m)^2~m$') -plt.legend() -#plt.ylim(Gplot.min()*1.4, Gplot.max()*1.4 ) -#plt.xlim(xlims) +Gk.sel(beam=k).gFT_PSD_data.plot() -F.save_light(path=plot_path, name = 'B03_specs_L'+str(Lmeters)) -# %% -Gk.sel(beam = k).gFT_PSD_data.plot() +def plot_model_eta(D, ax, offset=0, xscale=1e3, **kargs): + eta = D.eta + D.x + y_data = D.y_model + offset + plt.plot(eta / xscale, y_data, **kargs) -# %% define simple routines -def plot_model_eta(D, ax, offset = 0, xscale= 1e3 , **kargs ): - eta = D.eta + D.x - y_data = D.y_model+offset - plt.plot(eta/xscale,y_data , **kargs) + ax.axvline(eta[0].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) + ax.axvline(eta[-1].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) - ax.axvline(eta[0].data/xscale , linewidth=2, color=kargs['color'], alpha=0.5) - ax.axvline(eta[-1].data/xscale, linewidth=2, color=kargs['color'], alpha=0.5) def add_info(D, Dk, ylims): - eta = D.eta + D.x - N_per_stancil, ksize = Dk.N_per_stancil.data , Dk.k.size - plt.text(eta[0].data, ylims[-1], ' N='+numtostr(N_per_stancil) + ' N/2M= '+ fltostr(N_per_stancil/2/ksize, 1) ) - -def plot_data_eta(D, offset = 0,xscale= 1e3 , **kargs ): - eta_1 = D.eta + D.x - y_data = D.y_model +offset - plt.plot(eta_1/xscale,y_data , **kargs) + eta = D.eta + D.x + N_per_stancil, ksize = Dk.N_per_stancil.data, Dk.k.size + plt.text( + eta[0].data, + ylims[-1], + " N=" + + numtostr(N_per_stancil) + + " N/2M= " + + fltostr(N_per_stancil / 2 / ksize, 1), + ) + + +def plot_data_eta(D, offset=0, xscale=1e3, **kargs): + eta_1 = D.eta + D.x + y_data = D.y_model + offset + plt.plot(eta_1 / xscale, y_data, **kargs) return eta_1 -# %% phase examples -### overlapping views -#for i in np.arange(0,29,2): -# i = 4 -# c1= 'blue' -# c2= 'red' -# -# Gx_1 = Gx.isel(x= i).sel(beam = k) -# Gx_2 = Gx.isel(x= i+1).sel(beam = k) -# -# Gk_1 = Gk.isel(x= i).sel(beam = k) -# Gk_2 = Gk.isel(x= i+1).sel(beam = k) -# -# fltostr = MT.float_to_str -# numtostr = MT.num_to_str -# -# #if k%2 ==0: -# font_for_print() -# F = M.figure_axis_xy(9, 5, container =True, view_scale= 0.8) -# -# plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) -# gs = GridSpec(3,4, wspace=0.2, hspace=.5)#figure=fig, -# -# ax0 = F.fig.add_subplot(gs[0, :]) -# -# -# -# plot_model_eta(Gx_1, ax0, linestyle='-', color=c1, linewidth=0.4, alpha=1, zorder=12 ) -# plot_model_eta(Gx_2, ax0, linestyle='-', color=c2, linewidth=0.4, alpha=1, zorder=12 ) -# -# ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 -# -# add_info(Gx_1, Gk_1 , ylims ) -# add_info(Gx_2, Gk_1 , ylims ) -# -# # oringial data -# -# eta_1= plot_data_eta(Gx_1 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) -# eta_2= plot_data_eta(Gx_2 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) -# -# dx = eta_1.diff('eta').mean() -# plt.xlim(eta_1[0].data - 40 * dx, eta_2[-1].data + 40 * dx ) -# plt.ylim(ylims[0], ylims[-1]) -# - -# %% Single views - -def plot_data_eta(D, offset = 0 , **kargs ): - eta_1 = D.eta# + D.x - y_data = D.y_model +offset - plt.plot(eta_1,y_data , **kargs) +def plot_data_eta(D, offset=0, **kargs): + eta_1 = D.eta # + D.x + y_data = D.y_model + offset + plt.plot(eta_1, y_data, **kargs) return eta_1 -def plot_model_eta(D, ax, offset = 0, **kargs ): - eta = D.eta #+ D.x - y_data = D.y_model+offset - plt.plot(eta ,y_data , **kargs) - ax.axvline(eta[0].data, linewidth=0.1, color=kargs['color'], alpha=0.5) - ax.axvline(eta[-1].data, linewidth=0.1, color=kargs['color'], alpha=0.5) +def plot_model_eta(D, ax, offset=0, **kargs): + eta = D.eta # + D.x + y_data = D.y_model + offset + plt.plot(eta, y_data, **kargs) + + ax.axvline(eta[0].data, linewidth=0.1, color=kargs["color"], alpha=0.5) + ax.axvline(eta[-1].data, linewidth=0.1, color=kargs["color"], alpha=0.5) + -if ('y_data' in Gx.sel(beam = 'gt3r').keys()): - print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) +if "y_data" in Gx.sel(beam="gt3r").keys(): + print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) else: - print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) - MT.json_save('B03_fail', plot_path, {'reason':'no y_data'}) - print('failed, exit') + print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) + MT.json_save("B03_fail", plot_path, {"reason": "no y_data"}) + print("failed, exit") exit() - -# %% fltostr = MT.float_to_str numtostr = MT.num_to_str font_for_print() +MT.mkdirs_r(plot_path + "B03_spectra/") -#for i in x_pos_sel[::2]: -#i =x_pos_sel[20] -MT.mkdirs_r(plot_path+'B03_spectra/') - -x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data.data)] -x_pos_max = Gk.mean('beam').mean('k').gFT_PSD_data[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data)].argmax().data -xpp = x_pos_sel[ [int(i) for i in np.round(np.linspace(0, x_pos_sel.size-1, 4))]] +x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data.data)] +x_pos_max = ( + Gk.mean("beam") + .mean("k") + .gFT_PSD_data[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data)] + .argmax() + .data +) +xpp = x_pos_sel[[int(i) for i in np.round(np.linspace(0, x_pos_sel.size - 1, 4))]] xpp = np.insert(xpp, 0, x_pos_max) for i in xpp: + F = M.figure_axis_xy(6, 8, container=True, view_scale=0.8) - #i = xpp[0] - F = M.figure_axis_xy(6, 8, container =True, view_scale= 0.8) - - plt.suptitle('gFT Model and Spectrograms | x='+str(Gk.x[i].data)+' \n' + track_name, y = 0.95) - gs = GridSpec(5,6, wspace=0.2, hspace=0.7)#figure=fig, + plt.suptitle( + "gFT Model and Spectrograms | x=" + str(Gk.x[i].data) + " \n" + track_name, + y=0.95, + ) + gs = GridSpec(5, 6, wspace=0.2, hspace=0.7) ax0 = F.fig.add_subplot(gs[0:2, :]) - col_d = color_schemes.__dict__['rels'] + col_d = color_schemes.__dict__["rels"] neven = True offs = 0 for k in all_beams: - - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) - - plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) - ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 - #add_info(Gx_1, Gk_1 , ylims ) + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) + + plot_model_eta( + Gx_1, + ax0, + offset=offs, + linestyle="-", + color=col_d[k], + linewidth=0.4, + alpha=1, + zorder=12, + ) + ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 # oringial data - eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) + eta_1 = plot_data_eta( + Gx_1, offset=offs, linestyle="-", c="k", linewidth=1, alpha=0.5, zorder=11 + ) # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) _ = FT.get_H() - FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) - plt.plot(Gx_1.eta, FT.model()+offs ,'-', c='orange', linewidth=0.3, alpha=1,zorder= 2) + FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) + plt.plot( + Gx_1.eta, + FT.model() + offs, + "-", + c="orange", + linewidth=0.3, + alpha=1, + zorder=2, + ) if neven: neven = False - offs += .3 + offs += 0.3 else: neven = True - offs +=0.6 - + offs += 0.6 - dx = eta_1.diff('eta').mean().data + dx = eta_1.diff("eta").mean().data - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks/1e3) - plt.xlim( eta_1[0].data - 40 * dx, eta_1[-1].data+ 40 * dx ) - plt.title('Model reconst.', loc ='left') - - - plt.ylabel('relative slope (m/m)') - plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') + ax0.set_xticklabels(eta_ticks / 1e3) + plt.xlim(eta_1[0].data - 40 * dx, eta_1[-1].data + 40 * dx) + plt.title("Model reconst.", loc="left") + plt.ylabel("relative slope (m/m)") + plt.xlabel( + "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" + ) # spectra # define threshold k_thresh = 0.085 ax1_list = list() - dd_max=list() - for pos, kgroup, lflag in zip([ gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], [['gt1l', 'gt1r'], ['gt2l', 'gt2r'], ['gt3l', 'gt3r']], [True, False, False] ): - + dd_max = list() + for pos, kgroup, lflag in zip( + [gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], + [["gt1l", "gt1r"], ["gt2l", "gt2r"], ["gt3l", "gt3r"]], + [True, False, False], + ): ax11 = F.fig.add_subplot(pos) ax11.tick_params(labelleft=lflag) ax1_list.append(ax11) for k in kgroup: + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) + klim = Gk_1.k[0], Gk_1.k[-1] - klim= Gk_1.k[0], Gk_1.k[-1] + if "l" in k: + dd = Gk_1.gFT_PSD_data + plt.plot(Gk_1.k, dd, color="gray", linewidth=0.5, alpha=0.5) - if 'l' in k: - dd = Gk_1.gFT_PSD_data#.rolling(k=10, min_periods= 1, center=True).mean() - plt.plot(Gk_1.k, dd, color='gray', linewidth=.5 ,alpha= 0.5 ) - - dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods= 1, center=True).mean() - plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=.8 ) + dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods=1, center=True).mean() + plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=0.8) dd_max.append(np.nanmax(dd.data)) plt.xlim(klim) if lflag: - plt.ylabel('$(m/m)^2/k$') - plt.title('Energy Spectra', loc ='left') - - plt.xlabel('wavenumber k (2$\pi$ m$^{-1}$)') + plt.ylabel("$(m/m)^2/k$") + plt.title("Energy Spectra", loc="left") - #plt.ylim(dd.min(), max(dd_max) * 1.1) + plt.xlabel("wavenumber k (2$\pi$ m$^{-1}$)") - ax11.axvline(k_thresh, linewidth=1, color='gray', alpha=1) - ax11.axvspan(k_thresh , klim[-1], color='gray', alpha=0.5, zorder=12) + ax11.axvline(k_thresh, linewidth=1, color="gray", alpha=1) + ax11.axvspan(k_thresh, klim[-1], color="gray", alpha=0.5, zorder=12) if ~np.isnan(np.nanmax(dd_max)): for ax in ax1_list: @@ -517,48 +524,58 @@ def plot_model_eta(D, ax, offset = 0, **kargs ): neven = True offs = 0 for k in all_beams: + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) - - #plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) - ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 - #add_info(Gx_1, Gk_1 , ylims ) + ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 # oringial data - eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1.5, alpha =0.5, zorder=11) + eta_1 = plot_data_eta( + Gx_1, offset=offs, linestyle="-", c="k", linewidth=1.5, alpha=0.5, zorder=11 + ) # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) _ = FT.get_H() - FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) + FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) - p_hat_k = np.concatenate([ Gk_1.k, Gk_1.k ]) + p_hat_k = np.concatenate([Gk_1.k, Gk_1.k]) k_mask = p_hat_k < k_thresh FT.p_hat[~k_mask] = 0 - plt.plot(Gx_1.eta, FT.model()+offs ,'-', c=col_d[k], linewidth=0.8, alpha=1,zorder= 12) + plt.plot( + Gx_1.eta, + FT.model() + offs, + "-", + c=col_d[k], + linewidth=0.8, + alpha=1, + zorder=12, + ) if neven: neven = False - offs += .3 + offs += 0.3 else: neven = True - offs +=0.6 + offs += 0.6 - dx = eta_1.diff('eta').mean().data + dx = eta_1.diff("eta").mean().data - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks/1e3) - plt.xlim( eta_1[1000].data - 40 * dx, eta_1[-1000].data+ 40 * dx ) - plt.title('Low-Wavenumber Model reconst.', loc ='left') + ax0.set_xticklabels(eta_ticks / 1e3) + plt.xlim(eta_1[1000].data - 40 * dx, eta_1[-1000].data + 40 * dx) + plt.title("Low-Wavenumber Model reconst.", loc="left") + plt.ylabel("relative slope (m/m)") + plt.xlabel( + "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" + ) - plt.ylabel('relative slope (m/m)') - plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') + F.save_pup(path=plot_path + "B03_spectra/", name="B03_freq_reconst_x" + str(i)) - F.save_pup(path=plot_path+'B03_spectra/', name = 'B03_freq_reconst_x'+str(i)) - -MT.json_save('B03_success', plot_path, {'time':'time.asctime( time.localtime(time.time()) )'}) +MT.json_save( + "B03_success", plot_path, {"time": "time.asctime( time.localtime(time.time()) )"} +) From 6b34132806bfc713a321cbbf1107a4cef705250e Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Thu, 11 Jan 2024 10:57:21 -0500 Subject: [PATCH 03/12] remove analysis_db/B03_plot_spectra_ov.py --- .../analysis_db/B03_plot_spectra_ov.py | 581 ------------------ 1 file changed, 581 deletions(-) delete mode 100644 src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py diff --git a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py deleted file mode 100644 index 94deba31..00000000 --- a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py +++ /dev/null @@ -1,581 +0,0 @@ -import os, sys - -""" -This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. -This is python 3 -""" - -from icesat2_tracks.config.IceSAT2_startup import ( - mconfig, - xr, - color_schemes, - plt, - np, - font_for_print, -) - -import icesat2_tracks.ICEsat2_SI_tools.io as io -import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec - -import time -from matplotlib.gridspec import GridSpec -import icesat2_tracks.ICEsat2_SI_tools.generalized_FT as gFT -from scipy.ndimage.measurements import label -import icesat2_tracks.local_modules.m_tools_ph3 as MT -from icesat2_tracks.local_modules import m_general_ph3 as M - - -track_name, batch_key, test_flag = io.init_from_input( - sys.argv -) # loads standard experiment -hemis, batch = batch_key.split("_") - -load_path = mconfig["paths"]["work"] + batch_key + "/B02_spectra/" -load_file = load_path + "B02_" + track_name # + '.nc' -plot_path = ( - mconfig["paths"]["plot"] + "/" + hemis + "/" + batch_key + "/" + track_name + "/" -) -MT.mkdirs_r(plot_path) - -Gk = xr.open_dataset(load_file + "_gFT_k.nc") -Gx = xr.open_dataset(load_file + "_gFT_x.nc") - -Gfft = xr.open_dataset(load_file + "_FFT.nc") -time.sleep(2) - - -all_beams = mconfig["beams"]["all_beams"] -high_beams = mconfig["beams"]["high_beams"] -low_beams = mconfig["beams"]["low_beams"] -color_schemes.colormaps2(21) - -col_dict = color_schemes.rels -F = M.figure_axis_xy(9, 3, view_scale=0.5) - -plt.subplot(1, 3, 1) -plt.title(track_name, loc="left") -for k in all_beams: - I = Gk.sel(beam=k) - I2 = Gx.sel(beam=k) - plt.plot(I["lon"], I["lat"], ".", c=col_dict[k], markersize=0.7, linewidth=0.3) - plt.plot(I2["lon"], I2["lat"], "|", c=col_dict[k], markersize=0.7) - - -plt.xlabel("lon") -plt.ylabel("lat") - -plt.subplot(1, 3, 2) - -xscale = 1e3 -for k in all_beams: - I = Gk.sel(beam=k) - plt.plot( - I["x_coord"] / xscale, - I["y_coord"] / xscale, - ".", - c=col_dict[k], - linewidth=0.8, - markersize=0.8, - ) - -plt.xlabel("x_coord (km)") -plt.ylabel("y_coord (km)") - -plt.subplot(1, 3, 3) - -xscale = 1e3 -for k in all_beams: - I = Gk.sel(beam=k) - plt.plot( - I["x_coord"] / xscale, - (I["y_coord"] - I["y_coord"][0]), - ".", - c=col_dict[k], - linewidth=0.8, - markersize=0.8, - ) - -plt.xlabel("x_coord (km)") -plt.ylabel("y_coord deviation (m)") - - -F.save_light(path=plot_path, name="B03_specs_coord_check") - - -def dict_weighted_mean(Gdict, weight_key): - """ - returns the weighted meean of a dict of xarray, data_arrays - weight_key must be in the xr.DataArrays - """ - - akey = list(Gdict.keys())[0] - GSUM = Gdict[akey].copy() - GSUM.data = np.zeros(GSUM.shape) - N_per_stancil = GSUM.N_per_stancil * 0 - N_photons = np.zeros(GSUM.N_per_stancil.size) - - counter = 0 - for I in Gdict.items(): - I = I.squeeze() - if len(I.x) != 0: - GSUM += I.where(~np.isnan(I), 0) * I[weight_key] # .sel(x=GSUM.x) - N_per_stancil += I[weight_key] - if "N_photons" in GSUM.coords: - N_photons += I["N_photons"] - counter += 1 - - GSUM = GSUM / N_per_stancil - - if "N_photons" in GSUM.coords: - GSUM.coords["N_photons"] = (("x", "beam"), np.expand_dims(N_photons, 1)) - - GSUM["beam"] = ["weighted_mean"] - GSUM.name = "power_spec" - - return GSUM - - -G_gFT_wmean = ( - Gk["gFT_PSD_data"].where(~np.isnan(Gk["gFT_PSD_data"]), 0) * Gk["N_per_stancil"] -).sum("beam") / Gk["N_per_stancil"].sum("beam") -G_gFT_wmean["N_per_stancil"] = Gk["N_per_stancil"].sum("beam") - -G_fft_wmean = (Gfft.where(~np.isnan(Gfft), 0) * Gfft["N_per_stancil"]).sum( - "beam" -) / Gfft["N_per_stancil"].sum("beam") -G_fft_wmean["N_per_stancil"] = Gfft["N_per_stancil"].sum("beam") - - -def plot_wavenumber_spectrogram(ax, Gi, clev, title=None, plot_photon_density=True): - if Gi.k[0] == 0: - Gi = Gi.sel(k=Gi.k[1:]) - x_lambda = 2 * np.pi / Gi.k - plt.pcolormesh( - Gi.x / 1e3, x_lambda, Gi, cmap=plt.cm.ocean_r, vmin=clev[0], vmax=clev[-1] - ) - - ax.set_yscale("log") - - if plot_photon_density: - plt.plot( - Gi.x / 1e3, - x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, - c="black", - linewidth=0.8, - label="NAN-density", - ) - plt.fill_between( - Gi.x / 1e3, - x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, - 0, - color="gray", - alpha=0.3, - ) - ax.axhline(30, color="black", linewidth=0.3) - - plt.ylim(x_lambda[-1], x_lambda[0]) - plt.title(title, loc="left") - - -Gmean = G_gFT_wmean.rolling(k=5, center=True).mean() - -try: - k_max_range = ( - Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 0.75, - Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1, - Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1.25, - ) -except: - k_max_range = ( - Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 0.75, - Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1, - Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1.25, - ) - -font_for_print() -F = M.figure_axis_xy(6.5, 5.6, container=True, view_scale=1) -Lmeters = Gk.L.data[0] - -plt.suptitle("gFT Slope Spectrograms\n" + track_name, y=0.98) -gs = GridSpec(3, 3, wspace=0.2, hspace=0.5) - -Gplot = ( - G_gFT_wmean.squeeze() - .rolling(k=10, min_periods=1, center=True) - .median() - .rolling(x=3, min_periods=1, center=True) - .median() -) -dd = 10 * np.log10(Gplot) -dd = dd.where(~np.isinf(dd), np.nan) -clev_log = M.clevels([dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31) * 1 - -xlims = Gmean.x[0] / 1e3, Gmean.x[-1] / 1e3 - -k = high_beams[0] -for pos, k, pflag in zip( - [gs[0, 0], gs[0, 1], gs[0, 2]], high_beams, [True, False, False] -): - ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() - dd2 = 10 * np.log10(Gplot) - dd2 = dd2.where(~np.isinf(dd2), np.nan) - plot_wavenumber_spectrogram( - ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True - ) - plt.xlim(xlims) - if pflag: - plt.ylabel("Wave length\n(meters)") - plt.legend() - -for pos, k, pflag in zip( - [gs[1, 0], gs[1, 1], gs[1, 2]], low_beams, [True, False, False] -): - ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() - dd2 = 10 * np.log10(Gplot) - dd2 = dd2.where(~np.isinf(dd2), np.nan) - plot_wavenumber_spectrogram( - ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True - ) - plt.xlim(xlims) - if pflag: - plt.ylabel("Wave length\n(meters)") - plt.legend() - -ax0 = F.fig.add_subplot(gs[2, 0]) - -plot_wavenumber_spectrogram( - ax0, - dd, - clev_log, - title="smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$", - plot_photon_density=True, -) -plt.xlim(xlims) - -ax0.axhline(2 * np.pi / k_max_range[0], color="red", linestyle="--", linewidth=0.5) -ax0.axhline(2 * np.pi / k_max_range[1], color="red", linestyle="-", linewidth=0.5) -ax0.axhline(2 * np.pi / k_max_range[2], color="red", linestyle="--", linewidth=0.5) - -if pflag: - plt.ylabel("Wave length\n(meters)") - plt.legend() - -pos = gs[2, 1] -ax0 = F.fig.add_subplot(pos) -plt.title("Photons density ($m^{-1}$)", loc="left") - -for k in all_beams: - I = Gk.sel(beam=k)["gFT_PSD_data"] - plt.plot(Gplot.x / 1e3, I.N_photons / I.L.data, label=k, linewidth=0.8) -plt.plot( - Gplot.x / 1e3, - G_gFT_wmean.N_per_stancil / 3 / I.L.data, - c="black", - label="ave Photons", - linewidth=0.8, -) -plt.xlim(xlims) -plt.xlabel("Distance from the Ice Edge (km)") - -pos = gs[2, 2] - -ax0 = F.fig.add_subplot(pos) -ax0.set_yscale("log") - -plt.title("Peak Spectal Power", loc="left") - -x0 = Gk.x[0].data -for k in all_beams: - I = Gk.sel(beam=k)["gFT_PSD_data"] - plt.scatter( - I.x.data / 1e3, - I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k").data, - s=0.5, - marker=".", - color="red", - alpha=0.3, - ) - I = Gfft.sel(beam=k) - plt.scatter( - (x0 + I.x.data) / 1e3, - I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), - s=0.5, - marker=".", - c="blue", - alpha=0.3, - ) - - -Gplot = G_fft_wmean.squeeze() -Gplot = Gplot.power_spec[:, Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data * 0.9] -plt.plot( - (x0 + Gplot.x) / 1e3, - Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), - ".", - markersize=1.5, - c="blue", - label="FFT", -) - -Gplot = G_gFT_wmean.squeeze() -plt.plot( - Gplot.x / 1e3, - Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), - ".", - markersize=1.5, - c="red", - label="gFT", -) - -plt.ylabel("1e-3 $(m)^2~m$") -plt.legend() - -F.save_light(path=plot_path, name="B03_specs_L" + str(Lmeters)) - -Gk.sel(beam=k).gFT_PSD_data.plot() - - -def plot_model_eta(D, ax, offset=0, xscale=1e3, **kargs): - eta = D.eta + D.x - y_data = D.y_model + offset - plt.plot(eta / xscale, y_data, **kargs) - - ax.axvline(eta[0].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) - ax.axvline(eta[-1].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) - - -def add_info(D, Dk, ylims): - eta = D.eta + D.x - N_per_stancil, ksize = Dk.N_per_stancil.data, Dk.k.size - plt.text( - eta[0].data, - ylims[-1], - " N=" - + numtostr(N_per_stancil) - + " N/2M= " - + fltostr(N_per_stancil / 2 / ksize, 1), - ) - - -def plot_data_eta(D, offset=0, xscale=1e3, **kargs): - eta_1 = D.eta + D.x - y_data = D.y_model + offset - plt.plot(eta_1 / xscale, y_data, **kargs) - return eta_1 - - -def plot_data_eta(D, offset=0, **kargs): - eta_1 = D.eta # + D.x - y_data = D.y_model + offset - plt.plot(eta_1, y_data, **kargs) - return eta_1 - - -def plot_model_eta(D, ax, offset=0, **kargs): - eta = D.eta # + D.x - y_data = D.y_model + offset - plt.plot(eta, y_data, **kargs) - - ax.axvline(eta[0].data, linewidth=0.1, color=kargs["color"], alpha=0.5) - ax.axvline(eta[-1].data, linewidth=0.1, color=kargs["color"], alpha=0.5) - - -if "y_data" in Gx.sel(beam="gt3r").keys(): - print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) -else: - print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) - MT.json_save("B03_fail", plot_path, {"reason": "no y_data"}) - print("failed, exit") - exit() - -fltostr = MT.float_to_str -numtostr = MT.num_to_str - -font_for_print() - -MT.mkdirs_r(plot_path + "B03_spectra/") - -x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data.data)] -x_pos_max = ( - Gk.mean("beam") - .mean("k") - .gFT_PSD_data[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data)] - .argmax() - .data -) -xpp = x_pos_sel[[int(i) for i in np.round(np.linspace(0, x_pos_sel.size - 1, 4))]] -xpp = np.insert(xpp, 0, x_pos_max) - -for i in xpp: - F = M.figure_axis_xy(6, 8, container=True, view_scale=0.8) - - plt.suptitle( - "gFT Model and Spectrograms | x=" + str(Gk.x[i].data) + " \n" + track_name, - y=0.95, - ) - gs = GridSpec(5, 6, wspace=0.2, hspace=0.7) - - ax0 = F.fig.add_subplot(gs[0:2, :]) - col_d = color_schemes.__dict__["rels"] - - neven = True - offs = 0 - for k in all_beams: - Gx_1 = Gx.isel(x=i).sel(beam=k) - Gk_1 = Gk.isel(x=i).sel(beam=k) - - plot_model_eta( - Gx_1, - ax0, - offset=offs, - linestyle="-", - color=col_d[k], - linewidth=0.4, - alpha=1, - zorder=12, - ) - ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 - - # oringial data - eta_1 = plot_data_eta( - Gx_1, offset=offs, linestyle="-", c="k", linewidth=1, alpha=0.5, zorder=11 - ) - - # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) - _ = FT.get_H() - FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) - plt.plot( - Gx_1.eta, - FT.model() + offs, - "-", - c="orange", - linewidth=0.3, - alpha=1, - zorder=2, - ) - - if neven: - neven = False - offs += 0.3 - else: - neven = True - offs += 0.6 - - dx = eta_1.diff("eta").mean().data - - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) - - ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks / 1e3) - plt.xlim(eta_1[0].data - 40 * dx, eta_1[-1].data + 40 * dx) - plt.title("Model reconst.", loc="left") - - plt.ylabel("relative slope (m/m)") - plt.xlabel( - "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" - ) - - # spectra - # define threshold - k_thresh = 0.085 - ax1_list = list() - dd_max = list() - for pos, kgroup, lflag in zip( - [gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], - [["gt1l", "gt1r"], ["gt2l", "gt2r"], ["gt3l", "gt3r"]], - [True, False, False], - ): - ax11 = F.fig.add_subplot(pos) - ax11.tick_params(labelleft=lflag) - ax1_list.append(ax11) - for k in kgroup: - Gx_1 = Gx.isel(x=i).sel(beam=k) - Gk_1 = Gk.isel(x=i).sel(beam=k) - - klim = Gk_1.k[0], Gk_1.k[-1] - - if "l" in k: - dd = Gk_1.gFT_PSD_data - plt.plot(Gk_1.k, dd, color="gray", linewidth=0.5, alpha=0.5) - - dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods=1, center=True).mean() - plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=0.8) - dd_max.append(np.nanmax(dd.data)) - plt.xlim(klim) - - if lflag: - plt.ylabel("$(m/m)^2/k$") - plt.title("Energy Spectra", loc="left") - - plt.xlabel("wavenumber k (2$\pi$ m$^{-1}$)") - - ax11.axvline(k_thresh, linewidth=1, color="gray", alpha=1) - ax11.axvspan(k_thresh, klim[-1], color="gray", alpha=0.5, zorder=12) - - if ~np.isnan(np.nanmax(dd_max)): - for ax in ax1_list: - ax.set_ylim(0, np.nanmax(dd_max) * 1.1) - - ax0 = F.fig.add_subplot(gs[-2:, :]) - - neven = True - offs = 0 - for k in all_beams: - Gx_1 = Gx.isel(x=i).sel(beam=k) - Gk_1 = Gk.isel(x=i).sel(beam=k) - - ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 - - # oringial data - eta_1 = plot_data_eta( - Gx_1, offset=offs, linestyle="-", c="k", linewidth=1.5, alpha=0.5, zorder=11 - ) - - # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) - _ = FT.get_H() - FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) - - p_hat_k = np.concatenate([Gk_1.k, Gk_1.k]) - k_mask = p_hat_k < k_thresh - FT.p_hat[~k_mask] = 0 - - plt.plot( - Gx_1.eta, - FT.model() + offs, - "-", - c=col_d[k], - linewidth=0.8, - alpha=1, - zorder=12, - ) - - if neven: - neven = False - offs += 0.3 - else: - neven = True - offs += 0.6 - - dx = eta_1.diff("eta").mean().data - - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) - - ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks / 1e3) - plt.xlim(eta_1[1000].data - 40 * dx, eta_1[-1000].data + 40 * dx) - plt.title("Low-Wavenumber Model reconst.", loc="left") - - plt.ylabel("relative slope (m/m)") - plt.xlabel( - "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" - ) - - F.save_pup(path=plot_path + "B03_spectra/", name="B03_freq_reconst_x" + str(i)) - -MT.json_save( - "B03_success", plot_path, {"time": "time.asctime( time.localtime(time.time()) )"} -) From 027018167ef978cf5ed7b03a4745b32b63c2bc3d Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Thu, 11 Jan 2024 11:01:06 -0500 Subject: [PATCH 04/12] added files and remove original file --- analysis_db/B03_plot_spectra_ov.py | 554 ----------------- .../analysis_db/B03_plot_spectra_ov.py | 581 ++++++++++++++++++ 2 files changed, 581 insertions(+), 554 deletions(-) delete mode 100644 analysis_db/B03_plot_spectra_ov.py create mode 100644 src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py diff --git a/analysis_db/B03_plot_spectra_ov.py b/analysis_db/B03_plot_spectra_ov.py deleted file mode 100644 index d03a7663..00000000 --- a/analysis_db/B03_plot_spectra_ov.py +++ /dev/null @@ -1,554 +0,0 @@ -# %% -import os, sys -#execfile(os.environ['PYTHONSTARTUP']) - -""" -This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. -This is python 3 -""" - -exec(open(os.environ['PYTHONSTARTUP']).read()) -exec(open(STARTUP_2021_IceSAT2).read()) - -#%matplotlib inline - -import ICEsat2_SI_tools.convert_GPS_time as cGPS -import h5py -import ICEsat2_SI_tools.io as io -import ICEsat2_SI_tools.spectral_estimates as spec - -import time -import imp -import copy -import spicke_remover -import datetime -import generalized_FT as gFT -from scipy.ndimage.measurements import label - -#import s3fs -# %% -track_name, batch_key, test_flag = io.init_from_input(sys.argv) # loads standard experiment -#track_name, batch_key, test_flag = '20190605061807_10380310_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190601094826_09790312_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190207111114_06260210_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190208152826_06440210_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190213133330_07190212_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190207002436_06190212_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190206022433_06050212_004_01', 'SH_batch02', False - -#track_name, batch_key, test_flag = 'SH_20190101_00570212', 'SH_batch04', True -#track_name, batch_key, test_flag = 'SH_20190219_08070210', 'SH_batchminimal', True - - - -#track_name, batch_key, test_flag = '20190219073735_08070210_004_01', 'SH_batch02', False -#print(track_name, batch_key, test_flag) -hemis, batch = batch_key.split('_') - -load_path = mconfig['paths']['work'] +batch_key+'/B02_spectra/' -load_file = load_path + 'B02_' + track_name #+ '.nc' -plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/' + track_name + '/' -MT.mkdirs_r(plot_path) - -Gk = xr.open_dataset(load_file+'_gFT_k.nc') -Gx = xr.open_dataset(load_file+'_gFT_x.nc') - -Gfft = xr.open_dataset(load_file+'_FFT.nc') -# print(Gk) -# print(Gx) -time.sleep(2) - - -# %% -# for ibeam in Gk.beam: -# print(Gk.sel(beam=ibeam).gFT_PSD_data.data) - -# %% -all_beams = mconfig['beams']['all_beams'] -high_beams = mconfig['beams']['high_beams'] -low_beams = mconfig['beams']['low_beams'] -#Gfilt = io.load_pandas_table_dict(track_name + '_B01_regridded', load_path) # rhis is the rar photon data -#Gd = io.load_pandas_table_dict(track_name + '_B01_binned' , load_path) # -col.colormaps2(21) - -# %% check paths (again) - -col_dict= col.rels -F = M.figure_axis_xy(9, 3, view_scale =0.5) - -plt.subplot(1,3, 1) -plt.title(track_name , loc ='left') -for k in all_beams: - I = Gk.sel(beam=k) - I2 = Gx.sel(beam=k) - plt.plot(I['lon'], I['lat'], '.', c= col_dict[k], markersize = 0.7, linewidth =0.3) - plt.plot(I2['lon'], I2['lat'], '|', c= col_dict[k], markersize = 0.7 ) - - -plt.xlabel('lon') -plt.ylabel('lat') - -plt.subplot(1,3, 2) - -xscale= 1e3 -for k in all_beams: - I = Gk.sel(beam=k) - plt.plot( I['x_coord']/xscale , I['y_coord']/xscale, '.' , c= col_dict[k] , linewidth = 0.8, markersize = 0.8 ) - # I2 = G_gFT[k] - # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) - -plt.xlabel('x_coord (km)') -plt.ylabel('y_coord (km)') - -plt.subplot(1,3, 3) - -xscale= 1e3 -for k in all_beams: - I = Gk.sel(beam=k) - plt.plot( I['x_coord']/xscale , (I['y_coord']-I['y_coord'][0]), '.' , c= col_dict[k], linewidth = 0.8, markersize = 0.8) - # I2 = G_gFT[k] - # plt.plot( I2.coords['x_coord']/xscale, I2.coords['y_coord']/xscale, '*' , markersize = 0.7) - -plt.xlabel('x_coord (km)') -plt.ylabel('y_coord deviation (m)') - - -F.save_light(path=plot_path, name = 'B03_specs_coord_check') - - -# %% -def dict_weighted_mean(Gdict, weight_key): - """ - returns the weighted meean of a dict of xarray, data_arrays - weight_key must be in the xr.DataArrays - """ - #Gdict = G_rar_fft - #weight_key='N_per_stancil' - - akey = list( Gdict.keys() )[0] - GSUM = Gdict[akey].copy() - GSUM.data = np.zeros(GSUM.shape) - N_per_stancil = GSUM.N_per_stancil * 0 - N_photons = np.zeros(GSUM.N_per_stancil.size) - - counter= 0 - for k,I in Gdict.items(): - #print(k) - I =I.squeeze() - print(len(I.x) ) - if len(I.x) !=0: - GSUM += I.where( ~np.isnan(I), 0) * I[weight_key] #.sel(x=GSUM.x) - N_per_stancil += I[weight_key] - if 'N_photons' in GSUM.coords: - N_photons += I['N_photons'] - counter+=1 - - GSUM = GSUM / N_per_stancil - - if 'N_photons' in GSUM.coords: - GSUM.coords['N_photons'] = (('x', 'beam'), np.expand_dims(N_photons, 1) ) - - GSUM['beam'] = ['weighted_mean'] - GSUM.name='power_spec' - - return GSUM - - -G_gFT_wmean = (Gk['gFT_PSD_data'].where( ~np.isnan(Gk['gFT_PSD_data']), 0) * Gk['N_per_stancil']).sum('beam')/ Gk['N_per_stancil'].sum('beam') -G_gFT_wmean['N_per_stancil'] = Gk['N_per_stancil'].sum('beam') - -G_fft_wmean = (Gfft.where( ~np.isnan(Gfft), 0) * Gfft['N_per_stancil']).sum('beam')/ Gfft['N_per_stancil'].sum('beam') -G_fft_wmean['N_per_stancil'] = Gfft['N_per_stancil'].sum('beam') - - -# %% plot -def plot_wavenumber_spectrogram(ax, Gi, clev, title= None, plot_photon_density=True ): - - if Gi.k[0] ==0: - Gi= Gi.sel(k=Gi.k[1:]) - x_lambda= 2 * np.pi/Gi.k - plt.pcolormesh(Gi.x/1e3, x_lambda , Gi, cmap=plt.cm.ocean_r , vmin = clev[0], vmax = clev[-1]) - - ax.set_yscale('log') - # plt.colorbar(orientation='vertical', pad=0.06, label='Spectral Power (m^2/m)') - - if plot_photon_density: - - plt.plot(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10 , c='black', linewidth= 0.8, label='NAN-density' ) - plt.fill_between(Gi.x/1e3, x_lambda[-1] + (Gi.N_per_stancil/Gi.N_per_stancil.max() ) * 10, 0, color='gray', alpha = 0.3) - ax.axhline(30, color='black', linewidth=0.3) - - #plt.xlabel('Distance from the Ice Edge (km)') - plt.ylim(x_lambda[-1], x_lambda[0]) - plt.title(title, loc='left') - -#Gplot = G.rolling(x=5, min_periods= 1, center=True).mean() -#Gmean = G_gFT_wmean.rolling(x=2, min_periods= 1, center=True).mean() -Gmean = G_gFT_wmean.rolling(k=5, center=True).mean() -#Gmean = Gmean.where(~np.isnan(Gmean), 0) -try: - k_max_range = Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 5)).mean('x').argmax().data].data* 1.25 -except: - k_max_range = Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 0.75, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1, Gmean.k[Gmean.isel(x= slice(0, 20)).mean('x').argmax().data].data* 1.25 - - -# %% -font_for_print() -F = M.figure_axis_xy(6.5, 5.6, container= True, view_scale =1) -Lmeters = Gk.L.data[0] - -plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) -gs = GridSpec(3,3, wspace=0.2, hspace=.5)#figure=fig, -#clev=np.arange(0, 6, 0.1)*3 - -#%matplotlib inline - -# define mean first for colorbar -Gplot = G_gFT_wmean.squeeze().rolling(k=10, min_periods= 1, center=True).median().rolling(x=3, min_periods= 1, center=True).median() -dd = 10 * np.log10(Gplot) -dd= dd.where(~np.isinf(dd), np.nan ) -clev_log = M.clevels( [dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31)* 1 - -#clev = M.clevels( [Gmean.quantile(0.6).data * 1e4, Gmean.quantile(0.99).data * 1e4], 31)/ 1e4 - -xlims= Gmean.x[0]/1e3, Gmean.x[-1]/1e3 - -k =high_beams[0] -for pos, k, pflag in zip([gs[0, 0],gs[0, 1],gs[0, 2] ], high_beams, [True, False, False] ): - ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() - #Gplot.mean('x').plot() - dd2 = 10 * np.log10(Gplot) - dd2= dd2.where(~np.isinf(dd2), np.nan ) - plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k + ' unsmoothed', plot_photon_density=True ) - plt.xlim(xlims) - # - if pflag: - plt.ylabel('Wave length\n(meters)') - plt.legend() - -for pos, k, pflag in zip([gs[1, 0],gs[1, 1],gs[1, 2] ], low_beams, [True, False, False] ): - ax0 = F.fig.add_subplot(pos) - Gplot = Gk.sel(beam = k).gFT_PSD_data.squeeze()#.rolling(k=10, x=2, min_periods= 1, center=True).mean() - #Gplot.mean('x').plot() - dd2 = 10 * np.log10(Gplot) - dd2= dd2.where(~np.isinf(dd2), np.nan ) - plot_wavenumber_spectrogram(ax0, dd2, clev_log, title =k+ ' unsmoothed', plot_photon_density=True ) - plt.xlim(xlims) - # - if pflag: - plt.ylabel('Wave length\n(meters)') - plt.legend() - -ax0 = F.fig.add_subplot(gs[2, 0]) - -plot_wavenumber_spectrogram(ax0, dd, clev_log , title ='smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$', plot_photon_density= True) -plt.xlim(xlims) - -# plt.plot(Gplot.x/1e3, 10* nan_list +20 , c='black', label='NAN-density' ) -# ax0.axhline(30, color='black', linewidth=0.5) - -ax0.axhline(2* np.pi/k_max_range[0], color='red', linestyle= '--', linewidth= 0.5) -ax0.axhline(2* np.pi/k_max_range[1], color='red', linestyle= '-', linewidth= 0.5) -ax0.axhline(2* np.pi/k_max_range[2], color='red', linestyle= '--', linewidth= 0.5) - -if pflag: - plt.ylabel('Wave length\n(meters)') - plt.legend() - -pos = gs[2, 1] -ax0 = F.fig.add_subplot(pos) -plt.title('Photons density ($m^{-1}$)', loc='left') - -for k in all_beams: - I = Gk.sel(beam = k)['gFT_PSD_data'] - plt.plot(Gplot.x/1e3, I.N_photons/I.L.data, label=k, linewidth=0.8) -plt.plot(Gplot.x/1e3, G_gFT_wmean.N_per_stancil/3/I.L.data , c='black', label='ave Photons' , linewidth=0.8) -plt.xlim(xlims) -plt.xlabel('Distance from the Ice Edge (km)') - -pos = gs[2, 2] - -ax0 = F.fig.add_subplot(pos) -ax0.set_yscale('log') - -plt.title('Peak Spectal Power', loc='left') - -x0 = Gk.x[0].data -for k in all_beams: - I = Gk.sel(beam = k)['gFT_PSD_data'] - plt.scatter(I.x.data/1e3, I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k').data , s=0.5, marker='.', color='red', alpha= 0.3) - - I= Gfft.sel(beam = k)#.to_array() - #I= I[:,I.N_per_stancil >= I.N_per_stancil.max().data*0.9] - plt.scatter( (x0 +I.x.data)/1e3, I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , s=0.5, marker='.', c='blue', alpha= 0.3) - - -Gplot= G_fft_wmean.squeeze() -Gplot = Gplot.power_spec[:,Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data*0.9] -plt.plot( (x0 + Gplot.x)/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.', markersize=1.5 , c='blue', label= 'FFT') - -Gplot= G_gFT_wmean.squeeze() -plt.plot( Gplot.x/1e3, Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate('k') , '.' , markersize=1.5, c='red', label= 'gFT') - -plt.ylabel('1e-3 $(m)^2~m$') -plt.legend() -#plt.ylim(Gplot.min()*1.4, Gplot.max()*1.4 ) -#plt.xlim(xlims) - -F.save_light(path=plot_path, name = 'B03_specs_L'+str(Lmeters)) - -# %% -Gk.sel(beam = k).gFT_PSD_data.plot() - -# %% define simple routines -def plot_model_eta(D, ax, offset = 0, xscale= 1e3 , **kargs ): - eta = D.eta + D.x - y_data = D.y_model+offset - plt.plot(eta/xscale,y_data , **kargs) - - ax.axvline(eta[0].data/xscale , linewidth=2, color=kargs['color'], alpha=0.5) - ax.axvline(eta[-1].data/xscale, linewidth=2, color=kargs['color'], alpha=0.5) - -def add_info(D, Dk, ylims): - eta = D.eta + D.x - N_per_stancil, ksize = Dk.N_per_stancil.data , Dk.k.size - plt.text(eta[0].data, ylims[-1], ' N='+numtostr(N_per_stancil) + ' N/2M= '+ fltostr(N_per_stancil/2/ksize, 1) ) - -def plot_data_eta(D, offset = 0,xscale= 1e3 , **kargs ): - eta_1 = D.eta + D.x - y_data = D.y_model +offset - plt.plot(eta_1/xscale,y_data , **kargs) - return eta_1 - - -# %% phase examples -### overlapping views -#for i in np.arange(0,29,2): -# i = 4 -# c1= 'blue' -# c2= 'red' -# -# Gx_1 = Gx.isel(x= i).sel(beam = k) -# Gx_2 = Gx.isel(x= i+1).sel(beam = k) -# -# Gk_1 = Gk.isel(x= i).sel(beam = k) -# Gk_2 = Gk.isel(x= i+1).sel(beam = k) -# -# fltostr = MT.float_to_str -# numtostr = MT.num_to_str -# -# #if k%2 ==0: -# font_for_print() -# F = M.figure_axis_xy(9, 5, container =True, view_scale= 0.8) -# -# plt.suptitle('gFT Slope Spectrograms\n' + track_name, y = 0.98) -# gs = GridSpec(3,4, wspace=0.2, hspace=.5)#figure=fig, -# -# ax0 = F.fig.add_subplot(gs[0, :]) -# -# -# -# plot_model_eta(Gx_1, ax0, linestyle='-', color=c1, linewidth=0.4, alpha=1, zorder=12 ) -# plot_model_eta(Gx_2, ax0, linestyle='-', color=c2, linewidth=0.4, alpha=1, zorder=12 ) -# -# ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 -# -# add_info(Gx_1, Gk_1 , ylims ) -# add_info(Gx_2, Gk_1 , ylims ) -# -# # oringial data -# -# eta_1= plot_data_eta(Gx_1 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) -# eta_2= plot_data_eta(Gx_2 , offset= 0 , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) -# -# dx = eta_1.diff('eta').mean() -# plt.xlim(eta_1[0].data - 40 * dx, eta_2[-1].data + 40 * dx ) -# plt.ylim(ylims[0], ylims[-1]) -# - -# %% Single views - -def plot_data_eta(D, offset = 0 , **kargs ): - eta_1 = D.eta# + D.x - y_data = D.y_model +offset - plt.plot(eta_1,y_data , **kargs) - return eta_1 - -def plot_model_eta(D, ax, offset = 0, **kargs ): - eta = D.eta #+ D.x - y_data = D.y_model+offset - plt.plot(eta ,y_data , **kargs) - - ax.axvline(eta[0].data, linewidth=0.1, color=kargs['color'], alpha=0.5) - ax.axvline(eta[-1].data, linewidth=0.1, color=kargs['color'], alpha=0.5) - -if ('y_data' in Gx.sel(beam = 'gt3r').keys()): - print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) -else: - print('ydata is ', ('y_data' in Gx.sel(beam = 'gt3r').keys()) ) - MT.json_save('B03_fail', plot_path, {'reason':'no y_data'}) - print('failed, exit') - exit() - - -# %% -fltostr = MT.float_to_str -numtostr = MT.num_to_str - -font_for_print() - - -#for i in x_pos_sel[::2]: -#i =x_pos_sel[20] -MT.mkdirs_r(plot_path+'B03_spectra/') - -x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data.data)] -x_pos_max = Gk.mean('beam').mean('k').gFT_PSD_data[~np.isnan(Gk.mean('beam').mean('k').gFT_PSD_data)].argmax().data -xpp = x_pos_sel[ [int(i) for i in np.round(np.linspace(0, x_pos_sel.size-1, 4))]] -xpp = np.insert(xpp, 0, x_pos_max) - -for i in xpp: - - #i = xpp[0] - F = M.figure_axis_xy(6, 8, container =True, view_scale= 0.8) - - plt.suptitle('gFT Model and Spectrograms | x='+str(Gk.x[i].data)+' \n' + track_name, y = 0.95) - gs = GridSpec(5,6, wspace=0.2, hspace=0.7)#figure=fig, - - ax0 = F.fig.add_subplot(gs[0:2, :]) - col_d = col.__dict__['rels'] - - neven = True - offs = 0 - for k in all_beams: - - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) - - plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) - ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 - #add_info(Gx_1, Gk_1 , ylims ) - - # oringial data - eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1, alpha =0.5, zorder=11) - - # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) - _ = FT.get_H() - FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) - plt.plot(Gx_1.eta, FT.model()+offs ,'-', c='orange', linewidth=0.3, alpha=1,zorder= 2) - - if neven: - neven = False - offs += .3 - else: - neven = True - offs +=0.6 - - - dx = eta_1.diff('eta').mean().data - - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) - - ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks/1e3) - plt.xlim( eta_1[0].data - 40 * dx, eta_1[-1].data+ 40 * dx ) - plt.title('Model reconst.', loc ='left') - - - plt.ylabel('relative slope (m/m)') - plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') - - - # spectra - # define threshold - k_thresh = 0.085 - ax1_list = list() - dd_max=list() - for pos, kgroup, lflag in zip([ gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], [['gt1l', 'gt1r'], ['gt2l', 'gt2r'], ['gt3l', 'gt3r']], [True, False, False] ): - - ax11 = F.fig.add_subplot(pos) - ax11.tick_params(labelleft=lflag) - ax1_list.append(ax11) - for k in kgroup: - - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) - - klim= Gk_1.k[0], Gk_1.k[-1] - - if 'l' in k: - dd = Gk_1.gFT_PSD_data#.rolling(k=10, min_periods= 1, center=True).mean() - plt.plot(Gk_1.k, dd, color='gray', linewidth=.5 ,alpha= 0.5 ) - - dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods= 1, center=True).mean() - plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=.8 ) - dd_max.append(np.nanmax(dd.data)) - plt.xlim(klim) - - if lflag: - plt.ylabel('$(m/m)^2/k$') - plt.title('Energy Spectra', loc ='left') - - plt.xlabel('wavenumber k (2$\pi$ m$^{-1}$)') - - #plt.ylim(dd.min(), max(dd_max) * 1.1) - - ax11.axvline(k_thresh, linewidth=1, color='gray', alpha=1) - ax11.axvspan(k_thresh , klim[-1], color='gray', alpha=0.5, zorder=12) - - if ~np.isnan(np.nanmax(dd_max)): - for ax in ax1_list: - ax.set_ylim(0, np.nanmax(dd_max) * 1.1) - - ax0 = F.fig.add_subplot(gs[-2:, :]) - - neven = True - offs = 0 - for k in all_beams: - - Gx_1 = Gx.isel(x= i).sel(beam = k) - Gk_1 = Gk.isel(x= i).sel(beam = k) - - #plot_model_eta(Gx_1, ax0, offset= offs, linestyle='-', color=col_d[k], linewidth=0.4, alpha=1, zorder=12 ) - ylims= -np.nanstd(Gx_1.y_data)*3, np.nanstd(Gx_1.y_data)*3 - #add_info(Gx_1, Gk_1 , ylims ) - - # oringial data - eta_1= plot_data_eta(Gx_1 , offset= offs , linestyle= '-', c='k',linewidth=1.5, alpha =0.5, zorder=11) - - # reconstruct in gaps - FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None,Gk_1.k ) - _ = FT.get_H() - FT.p_hat=np.concatenate([ Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff ]) - - p_hat_k = np.concatenate([ Gk_1.k, Gk_1.k ]) - k_mask = p_hat_k < k_thresh - FT.p_hat[~k_mask] = 0 - - plt.plot(Gx_1.eta, FT.model()+offs ,'-', c=col_d[k], linewidth=0.8, alpha=1,zorder= 12) - - if neven: - neven = False - offs += .3 - else: - neven = True - offs +=0.6 - - dx = eta_1.diff('eta').mean().data - - eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) - - ax0.set_xticks(eta_ticks) - ax0.set_xticklabels(eta_ticks/1e3) - plt.xlim( eta_1[1000].data - 40 * dx, eta_1[-1000].data+ 40 * dx ) - plt.title('Low-Wavenumber Model reconst.', loc ='left') - - - plt.ylabel('relative slope (m/m)') - plt.xlabel('segment distance $\eta$ (km) @ x='+fltostr(Gx_1.x.data/1e3, 2)+'km') - - F.save_pup(path=plot_path+'B03_spectra/', name = 'B03_freq_reconst_x'+str(i)) - -MT.json_save('B03_success', plot_path, {'time':'time.asctime( time.localtime(time.time()) )'}) diff --git a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py new file mode 100644 index 00000000..94deba31 --- /dev/null +++ b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py @@ -0,0 +1,581 @@ +import os, sys + +""" +This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. +This is python 3 +""" + +from icesat2_tracks.config.IceSAT2_startup import ( + mconfig, + xr, + color_schemes, + plt, + np, + font_for_print, +) + +import icesat2_tracks.ICEsat2_SI_tools.io as io +import icesat2_tracks.ICEsat2_SI_tools.spectral_estimates as spec + +import time +from matplotlib.gridspec import GridSpec +import icesat2_tracks.ICEsat2_SI_tools.generalized_FT as gFT +from scipy.ndimage.measurements import label +import icesat2_tracks.local_modules.m_tools_ph3 as MT +from icesat2_tracks.local_modules import m_general_ph3 as M + + +track_name, batch_key, test_flag = io.init_from_input( + sys.argv +) # loads standard experiment +hemis, batch = batch_key.split("_") + +load_path = mconfig["paths"]["work"] + batch_key + "/B02_spectra/" +load_file = load_path + "B02_" + track_name # + '.nc' +plot_path = ( + mconfig["paths"]["plot"] + "/" + hemis + "/" + batch_key + "/" + track_name + "/" +) +MT.mkdirs_r(plot_path) + +Gk = xr.open_dataset(load_file + "_gFT_k.nc") +Gx = xr.open_dataset(load_file + "_gFT_x.nc") + +Gfft = xr.open_dataset(load_file + "_FFT.nc") +time.sleep(2) + + +all_beams = mconfig["beams"]["all_beams"] +high_beams = mconfig["beams"]["high_beams"] +low_beams = mconfig["beams"]["low_beams"] +color_schemes.colormaps2(21) + +col_dict = color_schemes.rels +F = M.figure_axis_xy(9, 3, view_scale=0.5) + +plt.subplot(1, 3, 1) +plt.title(track_name, loc="left") +for k in all_beams: + I = Gk.sel(beam=k) + I2 = Gx.sel(beam=k) + plt.plot(I["lon"], I["lat"], ".", c=col_dict[k], markersize=0.7, linewidth=0.3) + plt.plot(I2["lon"], I2["lat"], "|", c=col_dict[k], markersize=0.7) + + +plt.xlabel("lon") +plt.ylabel("lat") + +plt.subplot(1, 3, 2) + +xscale = 1e3 +for k in all_beams: + I = Gk.sel(beam=k) + plt.plot( + I["x_coord"] / xscale, + I["y_coord"] / xscale, + ".", + c=col_dict[k], + linewidth=0.8, + markersize=0.8, + ) + +plt.xlabel("x_coord (km)") +plt.ylabel("y_coord (km)") + +plt.subplot(1, 3, 3) + +xscale = 1e3 +for k in all_beams: + I = Gk.sel(beam=k) + plt.plot( + I["x_coord"] / xscale, + (I["y_coord"] - I["y_coord"][0]), + ".", + c=col_dict[k], + linewidth=0.8, + markersize=0.8, + ) + +plt.xlabel("x_coord (km)") +plt.ylabel("y_coord deviation (m)") + + +F.save_light(path=plot_path, name="B03_specs_coord_check") + + +def dict_weighted_mean(Gdict, weight_key): + """ + returns the weighted meean of a dict of xarray, data_arrays + weight_key must be in the xr.DataArrays + """ + + akey = list(Gdict.keys())[0] + GSUM = Gdict[akey].copy() + GSUM.data = np.zeros(GSUM.shape) + N_per_stancil = GSUM.N_per_stancil * 0 + N_photons = np.zeros(GSUM.N_per_stancil.size) + + counter = 0 + for I in Gdict.items(): + I = I.squeeze() + if len(I.x) != 0: + GSUM += I.where(~np.isnan(I), 0) * I[weight_key] # .sel(x=GSUM.x) + N_per_stancil += I[weight_key] + if "N_photons" in GSUM.coords: + N_photons += I["N_photons"] + counter += 1 + + GSUM = GSUM / N_per_stancil + + if "N_photons" in GSUM.coords: + GSUM.coords["N_photons"] = (("x", "beam"), np.expand_dims(N_photons, 1)) + + GSUM["beam"] = ["weighted_mean"] + GSUM.name = "power_spec" + + return GSUM + + +G_gFT_wmean = ( + Gk["gFT_PSD_data"].where(~np.isnan(Gk["gFT_PSD_data"]), 0) * Gk["N_per_stancil"] +).sum("beam") / Gk["N_per_stancil"].sum("beam") +G_gFT_wmean["N_per_stancil"] = Gk["N_per_stancil"].sum("beam") + +G_fft_wmean = (Gfft.where(~np.isnan(Gfft), 0) * Gfft["N_per_stancil"]).sum( + "beam" +) / Gfft["N_per_stancil"].sum("beam") +G_fft_wmean["N_per_stancil"] = Gfft["N_per_stancil"].sum("beam") + + +def plot_wavenumber_spectrogram(ax, Gi, clev, title=None, plot_photon_density=True): + if Gi.k[0] == 0: + Gi = Gi.sel(k=Gi.k[1:]) + x_lambda = 2 * np.pi / Gi.k + plt.pcolormesh( + Gi.x / 1e3, x_lambda, Gi, cmap=plt.cm.ocean_r, vmin=clev[0], vmax=clev[-1] + ) + + ax.set_yscale("log") + + if plot_photon_density: + plt.plot( + Gi.x / 1e3, + x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, + c="black", + linewidth=0.8, + label="NAN-density", + ) + plt.fill_between( + Gi.x / 1e3, + x_lambda[-1] + (Gi.N_per_stancil / Gi.N_per_stancil.max()) * 10, + 0, + color="gray", + alpha=0.3, + ) + ax.axhline(30, color="black", linewidth=0.3) + + plt.ylim(x_lambda[-1], x_lambda[0]) + plt.title(title, loc="left") + + +Gmean = G_gFT_wmean.rolling(k=5, center=True).mean() + +try: + k_max_range = ( + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 0.75, + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1, + Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1.25, + ) +except: + k_max_range = ( + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 0.75, + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1, + Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1.25, + ) + +font_for_print() +F = M.figure_axis_xy(6.5, 5.6, container=True, view_scale=1) +Lmeters = Gk.L.data[0] + +plt.suptitle("gFT Slope Spectrograms\n" + track_name, y=0.98) +gs = GridSpec(3, 3, wspace=0.2, hspace=0.5) + +Gplot = ( + G_gFT_wmean.squeeze() + .rolling(k=10, min_periods=1, center=True) + .median() + .rolling(x=3, min_periods=1, center=True) + .median() +) +dd = 10 * np.log10(Gplot) +dd = dd.where(~np.isinf(dd), np.nan) +clev_log = M.clevels([dd.quantile(0.01).data, dd.quantile(0.98).data * 1.2], 31) * 1 + +xlims = Gmean.x[0] / 1e3, Gmean.x[-1] / 1e3 + +k = high_beams[0] +for pos, k, pflag in zip( + [gs[0, 0], gs[0, 1], gs[0, 2]], high_beams, [True, False, False] +): + ax0 = F.fig.add_subplot(pos) + Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() + dd2 = 10 * np.log10(Gplot) + dd2 = dd2.where(~np.isinf(dd2), np.nan) + plot_wavenumber_spectrogram( + ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True + ) + plt.xlim(xlims) + if pflag: + plt.ylabel("Wave length\n(meters)") + plt.legend() + +for pos, k, pflag in zip( + [gs[1, 0], gs[1, 1], gs[1, 2]], low_beams, [True, False, False] +): + ax0 = F.fig.add_subplot(pos) + Gplot = Gk.sel(beam=k).gFT_PSD_data.squeeze() + dd2 = 10 * np.log10(Gplot) + dd2 = dd2.where(~np.isinf(dd2), np.nan) + plot_wavenumber_spectrogram( + ax0, dd2, clev_log, title=k + " unsmoothed", plot_photon_density=True + ) + plt.xlim(xlims) + if pflag: + plt.ylabel("Wave length\n(meters)") + plt.legend() + +ax0 = F.fig.add_subplot(gs[2, 0]) + +plot_wavenumber_spectrogram( + ax0, + dd, + clev_log, + title="smoothed weighted mean \n10 $\log_{10}( (m/m)^2 m )$", + plot_photon_density=True, +) +plt.xlim(xlims) + +ax0.axhline(2 * np.pi / k_max_range[0], color="red", linestyle="--", linewidth=0.5) +ax0.axhline(2 * np.pi / k_max_range[1], color="red", linestyle="-", linewidth=0.5) +ax0.axhline(2 * np.pi / k_max_range[2], color="red", linestyle="--", linewidth=0.5) + +if pflag: + plt.ylabel("Wave length\n(meters)") + plt.legend() + +pos = gs[2, 1] +ax0 = F.fig.add_subplot(pos) +plt.title("Photons density ($m^{-1}$)", loc="left") + +for k in all_beams: + I = Gk.sel(beam=k)["gFT_PSD_data"] + plt.plot(Gplot.x / 1e3, I.N_photons / I.L.data, label=k, linewidth=0.8) +plt.plot( + Gplot.x / 1e3, + G_gFT_wmean.N_per_stancil / 3 / I.L.data, + c="black", + label="ave Photons", + linewidth=0.8, +) +plt.xlim(xlims) +plt.xlabel("Distance from the Ice Edge (km)") + +pos = gs[2, 2] + +ax0 = F.fig.add_subplot(pos) +ax0.set_yscale("log") + +plt.title("Peak Spectal Power", loc="left") + +x0 = Gk.x[0].data +for k in all_beams: + I = Gk.sel(beam=k)["gFT_PSD_data"] + plt.scatter( + I.x.data / 1e3, + I.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k").data, + s=0.5, + marker=".", + color="red", + alpha=0.3, + ) + I = Gfft.sel(beam=k) + plt.scatter( + (x0 + I.x.data) / 1e3, + I.power_spec.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + s=0.5, + marker=".", + c="blue", + alpha=0.3, + ) + + +Gplot = G_fft_wmean.squeeze() +Gplot = Gplot.power_spec[:, Gplot.N_per_stancil >= Gplot.N_per_stancil.max().data * 0.9] +plt.plot( + (x0 + Gplot.x) / 1e3, + Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + ".", + markersize=1.5, + c="blue", + label="FFT", +) + +Gplot = G_gFT_wmean.squeeze() +plt.plot( + Gplot.x / 1e3, + Gplot.sel(k=slice(k_max_range[0], k_max_range[2])).integrate("k"), + ".", + markersize=1.5, + c="red", + label="gFT", +) + +plt.ylabel("1e-3 $(m)^2~m$") +plt.legend() + +F.save_light(path=plot_path, name="B03_specs_L" + str(Lmeters)) + +Gk.sel(beam=k).gFT_PSD_data.plot() + + +def plot_model_eta(D, ax, offset=0, xscale=1e3, **kargs): + eta = D.eta + D.x + y_data = D.y_model + offset + plt.plot(eta / xscale, y_data, **kargs) + + ax.axvline(eta[0].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) + ax.axvline(eta[-1].data / xscale, linewidth=2, color=kargs["color"], alpha=0.5) + + +def add_info(D, Dk, ylims): + eta = D.eta + D.x + N_per_stancil, ksize = Dk.N_per_stancil.data, Dk.k.size + plt.text( + eta[0].data, + ylims[-1], + " N=" + + numtostr(N_per_stancil) + + " N/2M= " + + fltostr(N_per_stancil / 2 / ksize, 1), + ) + + +def plot_data_eta(D, offset=0, xscale=1e3, **kargs): + eta_1 = D.eta + D.x + y_data = D.y_model + offset + plt.plot(eta_1 / xscale, y_data, **kargs) + return eta_1 + + +def plot_data_eta(D, offset=0, **kargs): + eta_1 = D.eta # + D.x + y_data = D.y_model + offset + plt.plot(eta_1, y_data, **kargs) + return eta_1 + + +def plot_model_eta(D, ax, offset=0, **kargs): + eta = D.eta # + D.x + y_data = D.y_model + offset + plt.plot(eta, y_data, **kargs) + + ax.axvline(eta[0].data, linewidth=0.1, color=kargs["color"], alpha=0.5) + ax.axvline(eta[-1].data, linewidth=0.1, color=kargs["color"], alpha=0.5) + + +if "y_data" in Gx.sel(beam="gt3r").keys(): + print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) +else: + print("ydata is ", ("y_data" in Gx.sel(beam="gt3r").keys())) + MT.json_save("B03_fail", plot_path, {"reason": "no y_data"}) + print("failed, exit") + exit() + +fltostr = MT.float_to_str +numtostr = MT.num_to_str + +font_for_print() + +MT.mkdirs_r(plot_path + "B03_spectra/") + +x_pos_sel = np.arange(Gk.x.size)[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data.data)] +x_pos_max = ( + Gk.mean("beam") + .mean("k") + .gFT_PSD_data[~np.isnan(Gk.mean("beam").mean("k").gFT_PSD_data)] + .argmax() + .data +) +xpp = x_pos_sel[[int(i) for i in np.round(np.linspace(0, x_pos_sel.size - 1, 4))]] +xpp = np.insert(xpp, 0, x_pos_max) + +for i in xpp: + F = M.figure_axis_xy(6, 8, container=True, view_scale=0.8) + + plt.suptitle( + "gFT Model and Spectrograms | x=" + str(Gk.x[i].data) + " \n" + track_name, + y=0.95, + ) + gs = GridSpec(5, 6, wspace=0.2, hspace=0.7) + + ax0 = F.fig.add_subplot(gs[0:2, :]) + col_d = color_schemes.__dict__["rels"] + + neven = True + offs = 0 + for k in all_beams: + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) + + plot_model_eta( + Gx_1, + ax0, + offset=offs, + linestyle="-", + color=col_d[k], + linewidth=0.4, + alpha=1, + zorder=12, + ) + ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 + + # oringial data + eta_1 = plot_data_eta( + Gx_1, offset=offs, linestyle="-", c="k", linewidth=1, alpha=0.5, zorder=11 + ) + + # reconstruct in gaps + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) + _ = FT.get_H() + FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) + plt.plot( + Gx_1.eta, + FT.model() + offs, + "-", + c="orange", + linewidth=0.3, + alpha=1, + zorder=2, + ) + + if neven: + neven = False + offs += 0.3 + else: + neven = True + offs += 0.6 + + dx = eta_1.diff("eta").mean().data + + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + + ax0.set_xticks(eta_ticks) + ax0.set_xticklabels(eta_ticks / 1e3) + plt.xlim(eta_1[0].data - 40 * dx, eta_1[-1].data + 40 * dx) + plt.title("Model reconst.", loc="left") + + plt.ylabel("relative slope (m/m)") + plt.xlabel( + "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" + ) + + # spectra + # define threshold + k_thresh = 0.085 + ax1_list = list() + dd_max = list() + for pos, kgroup, lflag in zip( + [gs[2, 0:2], gs[2, 2:4], gs[2, 4:]], + [["gt1l", "gt1r"], ["gt2l", "gt2r"], ["gt3l", "gt3r"]], + [True, False, False], + ): + ax11 = F.fig.add_subplot(pos) + ax11.tick_params(labelleft=lflag) + ax1_list.append(ax11) + for k in kgroup: + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) + + klim = Gk_1.k[0], Gk_1.k[-1] + + if "l" in k: + dd = Gk_1.gFT_PSD_data + plt.plot(Gk_1.k, dd, color="gray", linewidth=0.5, alpha=0.5) + + dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods=1, center=True).mean() + plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=0.8) + dd_max.append(np.nanmax(dd.data)) + plt.xlim(klim) + + if lflag: + plt.ylabel("$(m/m)^2/k$") + plt.title("Energy Spectra", loc="left") + + plt.xlabel("wavenumber k (2$\pi$ m$^{-1}$)") + + ax11.axvline(k_thresh, linewidth=1, color="gray", alpha=1) + ax11.axvspan(k_thresh, klim[-1], color="gray", alpha=0.5, zorder=12) + + if ~np.isnan(np.nanmax(dd_max)): + for ax in ax1_list: + ax.set_ylim(0, np.nanmax(dd_max) * 1.1) + + ax0 = F.fig.add_subplot(gs[-2:, :]) + + neven = True + offs = 0 + for k in all_beams: + Gx_1 = Gx.isel(x=i).sel(beam=k) + Gk_1 = Gk.isel(x=i).sel(beam=k) + + ylims = -np.nanstd(Gx_1.y_data) * 3, np.nanstd(Gx_1.y_data) * 3 + + # oringial data + eta_1 = plot_data_eta( + Gx_1, offset=offs, linestyle="-", c="k", linewidth=1.5, alpha=0.5, zorder=11 + ) + + # reconstruct in gaps + FT = gFT.generalized_Fourier(Gx_1.eta + Gx_1.x, None, Gk_1.k) + _ = FT.get_H() + FT.p_hat = np.concatenate([Gk_1.gFT_cos_coeff, Gk_1.gFT_sin_coeff]) + + p_hat_k = np.concatenate([Gk_1.k, Gk_1.k]) + k_mask = p_hat_k < k_thresh + FT.p_hat[~k_mask] = 0 + + plt.plot( + Gx_1.eta, + FT.model() + offs, + "-", + c=col_d[k], + linewidth=0.8, + alpha=1, + zorder=12, + ) + + if neven: + neven = False + offs += 0.3 + else: + neven = True + offs += 0.6 + + dx = eta_1.diff("eta").mean().data + + eta_ticks = np.linspace(Gx_1.eta.data[0], Gx_1.eta.data[-1], 11) + + ax0.set_xticks(eta_ticks) + ax0.set_xticklabels(eta_ticks / 1e3) + plt.xlim(eta_1[1000].data - 40 * dx, eta_1[-1000].data + 40 * dx) + plt.title("Low-Wavenumber Model reconst.", loc="left") + + plt.ylabel("relative slope (m/m)") + plt.xlabel( + "segment distance $\eta$ (km) @ x=" + fltostr(Gx_1.x.data / 1e3, 2) + "km" + ) + + F.save_pup(path=plot_path + "B03_spectra/", name="B03_freq_reconst_x" + str(i)) + +MT.json_save( + "B03_success", plot_path, {"time": "time.asctime( time.localtime(time.time()) )"} +) From f34215d7413654b38a3deb618bc4279cd98fe1cf Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Thu, 11 Jan 2024 11:05:36 -0500 Subject: [PATCH 05/12] added ci on step3 --- .github/workflows/test-B01_SL_load_single_file.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-B01_SL_load_single_file.yml b/.github/workflows/test-B01_SL_load_single_file.yml index 6dcdee90..2c798d7c 100644 --- a/.github/workflows/test-B01_SL_load_single_file.yml +++ b/.github/workflows/test-B01_SL_load_single_file.yml @@ -24,4 +24,6 @@ jobs: - name: first step B01_SL_load_single_file run: python src/icesat2_tracks/analysis_db/B01_SL_load_single_file.py 20190502052058_05180312_005_01 SH_testSLsinglefile2 True - name: second step make_spectra - run: python src/icesat2_tracks/analysis_db/B02_make_spectra_gFT.py SH_20190502_05180312 SH_testSLsinglefile2 True \ No newline at end of file + run: python src/icesat2_tracks/analysis_db/B02_make_spectra_gFT.py SH_20190502_05180312 SH_testSLsinglefile2 True + - name: third step make_spectra + run: python src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py SH_20190502_05180312 SH_testSLsinglefile2 True \ No newline at end of file From ed627edb156cdecde8247515fbaf7d08a183ac18 Mon Sep 17 00:00:00 2001 From: Carlos Paniagua Date: Thu, 11 Jan 2024 15:23:17 -0500 Subject: [PATCH 06/12] chore: clean up startup.py --- src/icesat2_tracks/config/IceSAT2_startup.py | 135 +------------------ 1 file changed, 7 insertions(+), 128 deletions(-) diff --git a/src/icesat2_tracks/config/IceSAT2_startup.py b/src/icesat2_tracks/config/IceSAT2_startup.py index df477bae..b14cc5b4 100644 --- a/src/icesat2_tracks/config/IceSAT2_startup.py +++ b/src/icesat2_tracks/config/IceSAT2_startup.py @@ -10,7 +10,6 @@ #standart libraries: import numpy as np import matplotlib -#matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.colors as colors @@ -47,19 +46,10 @@ SMALL_SIZE = 8 MEDIUM_SIZE = 10 BIGGER_SIZE = 12 -#csfont = {'fontname':'Comic Sans MS'} -legend_properties = {'weight':'bold'} -#font.family: sans-serif -#font.sans-serif: Helvetica Neue -#import matplotlib.font_manager as font_manager -#font_dirs = ['/home/mhell/HelveticaNeue/', ] -#font_files = font_manager.findSystemFonts(fontpaths=font_dirs) -#font_list = font_manager.createFontList(font_files) -#font_manager.fontManager.ttflist.extend(font_list) +legend_properties = {'weight':'bold'} plt.rc('font', size=SMALL_SIZE, serif='Helvetica Neue', weight='normal') # controls default text sizes -#plt.rc('font', size=SMALL_SIZE, serif='DejaVu Sans', weight='light') plt.rc('text', usetex='false') plt.rc('axes', titlesize=MEDIUM_SIZE, labelweight='normal') # fontsize of the axes title plt.rc('axes', labelsize=SMALL_SIZE, labelweight='normal') #, family='bold') # fontsize of the x and y labels @@ -67,79 +57,13 @@ plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE, frameon=False) # legend fontsize plt.rc('figure', titlesize=MEDIUM_SIZE, titleweight='bold', autolayout=True) #, family='bold') # fontsize of the figure title - -#figure.autolayout : False -#matplotlib.rcParams['pdf.fonttype'] = 42 -#matplotlib.rcParams['ps.fonttype'] = 42 - - plt.rc('path', simplify=True) - -plt.rcParams['figure.figsize'] = (10, 8)#(20.0, 10.0) #inline +plt.rcParams['figure.figsize'] = (10, 8) plt.rcParams['pcolor.shading'] = 'auto' -#rcParams['pcolor.shading'] = 'auto' plt.rc('pcolor', shading = 'auto') - -### TICKS -# see http://matplotlib.org/api/axis_api.html#matplotlib.axis.Tick -#xtick.top : False # draw ticks on the top side -#xtick.bottom : True # draw ticks on the bottom side -#xtick.major.size : 3.5 # major tick size in points -#xtick.minor.size : 2 # minor tick size in points -#xtick.major.width : .8 # major tick width in points -#xtick.minor.width : 0.6 # minor tick width in points -#xtick.major.pad : 3.5 # distance to major tick label in points -#xtick.minor.pad : 3.4 # distance to the minor tick label in points -#xtick.color : k # color of the tick labels -#xtick.labelsize : medium # fontsize of the tick labels -#xtick.direction : out # direction: in, out, or inout -#xtick.minor.visible : False # visibility of minor ticks on x-axis -#xtick.major.top : True # draw x axis top major ticks -#xtick.major.bottom : True # draw x axis bottom major ticks -#xtick.minor.top : True # draw x axis top minor ticks -#xtick.minor.bottom : True # draw x axis bottom minor ticks - -#ytick.left : True # draw ticks on the left side -#ytick.right : False # draw ticks on the right side -#ytick.major.size : 3.5 # major tick size in points -#ytick.minor.size : 2 # minor tick size in points -#ytick.major.width : 0.8 # major tick width in points -#ytick.minor.width : 0.6 # minor tick width in points -#ytick.major.pad : 3.5 # distance to major tick label in points -#ytick.minor.pad : 3.4 # distance to the minor tick label in points -#ytick.color : k # color of the tick labels -#ytick.labelsize : medium # fontsize of the tick labels -#ytick.direction : out # direction: in, out, or inout -#ytick.minor.visible : False # visibility of minor ticks on y-axis -#ytick.major.left : True # draw y axis left major ticks -#ytick.major.right : True # draw y axis right major ticks -#ytick.minor.left : True # draw y axis left minor ticks -#ytick.minor.right : True # draw y axis right minor ticks - - plt.rc('xtick.major', size= 4, width=1 ) plt.rc('ytick.major', size= 3.8, width=1 ) - -#axes.facecolor : white # axes background color -#axes.edgecolor : black # axes edge color -#axes.linewidth : 0.8 # edge linewidth -#axes.grid : False # display grid or not -#axes.titlesize : large # fontsize of the axes title -#axes.titlepad : 6.0 # pad between axes and title in points -#axes.labelsize : medium # fontsize of the x any y labels -#axes.labelpad : 4.0 # space between label and axis -#axes.labelweight : normal # weight of the x and y labels -#axes.labelcolor : black - plt.rc('axes', labelsize= MEDIUM_SIZE, labelweight='normal') - - - - -# axes.spines.left : True # display axis spines -# axes.spines.bottom : True -# axes.spines.top : True -# axes.spines.right : True plt.rc('axes.spines', top= False, right=False ) @@ -148,20 +72,10 @@ def font_for_print(): SMALL_SIZE = 6 MEDIUM_SIZE = 8 - BIGGER_SIZE = 10 - #csfont = {'fontname':'Comic Sans MS'} - legend_properties = {'weight':'bold'} - #font.family: sans-serif - #font.sans-serif: Helvetica Neue - - #import matplotlib.font_manager as font_manager - #font_dirs = ['/home/mhell/HelveticaNeue/', ] - #font_files = font_manager.findSystemFonts(fontpaths=font_dirs) - #font_list = font_manager.createFontList(font_files) - #font_manager.fontManager.ttflist.extend(font_list) + BIGGER_SIZE = 10 # not used. CP + legend_properties = {'weight':'bold'} # not used. CP plt.rc('font', size=SMALL_SIZE, serif='Helvetica Neue', weight='normal') # controls default text sizes - #plt.rc('font', size=SMALL_SIZE, serif='DejaVu Sans', weight='light') plt.rc('text', usetex='false') plt.rc('axes', titlesize=MEDIUM_SIZE, labelweight='normal') # fontsize of the axes title plt.rc('axes', labelsize=SMALL_SIZE, labelweight='normal') #, family='bold') # fontsize of the x and y labels @@ -169,36 +83,17 @@ def font_for_print(): plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE, frameon=False) # legend fontsize plt.rc('figure', titlesize=MEDIUM_SIZE, titleweight='bold', autolayout=True) #, family='bold') # fontsize of the figure title - - #figure.autolayout : False - #matplotlib.rcParams['pdf.fonttype'] = 42 - #matplotlib.rcParams['ps.fonttype'] = 42 - - - #plt.rc('xtick.major', size= 4, width=1 ) - #plt.rc('ytick.major', size= 3.8, width=1 ) - - plt.rc('axes', labelsize= SMALL_SIZE, labelweight='normal') def font_for_pres(): SMALL_SIZE = 10 MEDIUM_SIZE = 12 - BIGGER_SIZE = 14 - #csfont = {'fontname':'Comic Sans MS'} - legend_properties = {'weight':'bold'} - #font.family: sans-serif - #font.sans-serif: Helvetica Neue - - #import matplotlib.font_manager as font_manager - #font_dirs = ['/home/mhell/HelveticaNeue/', ] - #font_files = font_manager.findSystemFonts(fontpaths=font_dirs) - #font_list = font_manager.createFontList(font_files) - #font_manager.fontManager.ttflist.extend(font_list) + BIGGER_SIZE = 14 # not used. CP + legend_properties = {'weight':'bold'} # not used. CP + plt.rc('font', size=SMALL_SIZE, serif='Helvetica Neue', weight='normal') # controls default text sizes - #plt.rc('font', size=SMALL_SIZE, serif='DejaVu Sans', weight='light') plt.rc('text', usetex='false') plt.rc('axes', titlesize=MEDIUM_SIZE, labelweight='normal') # fontsize of the axes title plt.rc('axes', labelsize=SMALL_SIZE, labelweight='normal') #, family='bold') # fontsize of the x and y labels @@ -207,20 +102,4 @@ def font_for_pres(): plt.rc('legend', fontsize=SMALL_SIZE, frameon=False) # legend fontsize plt.rc('figure', titlesize=MEDIUM_SIZE, titleweight='bold', autolayout=True) #, family='bold') # fontsize of the figure title - #figure.autolayout : False - #matplotlib.rcParams['pdf.fonttype'] = 42 - #matplotlib.rcParams['ps.fonttype'] = 42 - - - #plt.rc('xtick.major', size= 4, width=1 ) - #plt.rc('ytick.major', size= 3.8, width=1 ) - - plt.rc('axes', labelsize= SMALL_SIZE, labelweight='normal') - - - -# add project depenent libraries -#sys.path.append(config['paths']['local_script']) - - From 2066f657326fbd74460fc92c29ef2d772950a742 Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Fri, 12 Jan 2024 09:21:06 -0500 Subject: [PATCH 07/12] cleaning and formating ICEsat2_SI_tools/generalized_FT.py --- .../ICEsat2_SI_tools/generalized_FT.py | 168 +++--------------- 1 file changed, 26 insertions(+), 142 deletions(-) diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py index a8579e80..ad0432a5 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py @@ -31,7 +31,6 @@ def smooth_data_to_weight(dd, m=150): dd_fake[2 * m : -2 * m] = dd weight = lanczos.lanczos_filter_1d_wrapping(np.arange(dd_fake.size), dd_fake, m) - # weight= M.runningmean_wrap_around(dd_fake, m=m) weight = weight[2 * m : -2 * m] weight = weight / weight.max() @@ -55,12 +54,6 @@ def get_weights_from_data( x_model = np.arange(stancil[0], stancil[-1], dx) y_gridded = np.copy(x_model) * 0 y_gridded[x_pos] = y - # nan_mask =np.isnan(y_gridded) - - # def gaus(x, x_0, amp, sigma_g ): - # return amp* np.exp(-0.5 * ( (x-x_0)/sigma_g)**2) - # weight = data_weight.mean() * gaus(self.k, 0.05, 1 , 0.02)**(1/2) - # weight = weight *10+ weight.max()* 0.005 # add pemnalty floor # take FFT to get peaj parameters k_fft = np.fft.rfftfreq(x_model.size, d=dx) * 2 * np.pi @@ -73,7 +66,6 @@ def get_weights_from_data( pars = Spec_fft.set_parameters(flim=np.sqrt(9.81 * k[-1]) / 2 / np.pi) k_max = (pars["f_max"].value * 2 * np.pi) ** 2 / 9.81 - # print('k_max ', k_max) if method == "gaussian": # simple gaussian weight @@ -81,31 +73,25 @@ def gaus(x, x_0, amp, sigma_g): return amp * np.exp(-0.5 * ((x - x_0) / sigma_g) ** 2) weight = gaus(k, k_max, 1, 0.02) ** (1 / 2) - # weight = weight *1+ weight.max()* 0.1 # add pemnalty floor params = None elif method == "parametric": # JONSWAP weight f = np.sqrt(9.81 * k) / (2 * np.pi) - # weight = weight + weight.max()* 0.1 # add pemnalty floor - # optimzes paramteric function to data - # Spec_fft.data = Spec_fft.runningmean(Spec_fft.data , 10, tailcopy=True) - # Spec_fft.data[np.isnan(Spec_fft.data)] = 0 - weight = Spec_fft.create_weight(freq=f, plot_flag=False, max_nfev=max_nfev) if plot_flag: Spec_fft.fitter.params.pretty_print() params = Spec_fft.fitter.params - # weight = weight+ weight.max()* 0.05 # add pemnalty floor + else: raise ValueError(" 'method' must be either 'gaussian' or 'parametric' ") if plot_flag: import matplotlib.pyplot as plt - # plt.plot(k_fft[1:], Spec_fft.model_func(Spec_fft.freq, pars), 'b--' ) + plt.plot( k_fft[1:], Spec_fft.data, c="gray", label="FFT for Prior", linewidth=0.5 ) @@ -113,7 +99,7 @@ def gaus(x, x_0, amp, sigma_g): k, weight, zorder=12, c="black", label="Fitted model to FFT", linewidth=0.5 ) plt.xlim(k[0], k[-1]) - # plt.show() + # add pemnalty floor weight = weight + weight.max() * 0.1 @@ -147,17 +133,15 @@ def define_weights(stancil, prior, x, y, dx, k, max_nfev, plot_flag=False): weight, prior_pars = get_weights_from_data( x, y, dx, stancil, k, max_nfev, plot_flag=plot_flag, method="parametric" ) - # weight_name = "10 * $P_{init}$ from FFT" weight_name = "$P_{init}$ from FFT" elif ( type(prior) is tuple - ): # prior= (PSD_from_GFT, weight_used in inversion), this is all other first iteration - # combine old and new weights + ): weight = 0.2 * smooth_data_to_weight(prior[0]) + 0.8 * prior[1] - # weight_name = "10 * smth. $P_{i-1}$" + weight_name = "smth. $P_{i-1}$" prior_pars = {"alpha": None, "amp": None, "f_max": None, "gamma": None} - else: # prior = weight, this is all other iterations + else: weight = smooth_data_to_weight(prior) weight_name = "smth. from data" prior_pars = {"alpha": None, "amp": None, "f_max": None, "gamma": None} @@ -246,13 +230,9 @@ def cal_spectrogram( Lmeters, dk = self.Lmeters, self.dk Lpoints = self.Lpoints Lpoints_full = int(Lmeters / self.dx) - # win = self.win self.xlims = (np.round(X.min()), X.max()) if xlims is None else xlims # init Lomb scargle object with noise as nummy data () - # dy_fake= np.random.randn(len(dy))*0.001 if self.dy is not None else None - # self.LS = LombScargle(X[0:L] , np.random.randn(L)*0.001, fit_mean=True) - def calc_gFT_apply(stancil, prior): """ windows the data accoding to stencil and applies LS spectrogram @@ -263,7 +243,6 @@ def calc_gFT_apply(stancil, prior): import time ta = time.perf_counter() - # x = X[stancil[0]:stancil[-1]] x_mask = (stancil[0] <= X) & (X <= stancil[-1]) print(stancil[1]) @@ -271,8 +250,6 @@ def calc_gFT_apply(stancil, prior): if ( x.size / Lpoints < 0.1 ): # if there are not enough photos set results to nan - # return stancil[1], self.k*np.nan, np.fft.rfftfreq( int(self.Lpoints), d=self.dx)*np.nan, x.size - # return stancil[1], np.concatenate([self.k*np.nan , self.k*np.nan]), np.nan, np.nan, np.nan, x.size, False, False return { "stancil_center": stancil[1], "p_hat": np.concatenate([self.k * np.nan, self.k * np.nan]), @@ -290,7 +267,7 @@ def calc_gFT_apply(stancil, prior): y_var = y.var() FT = generalized_Fourier(x, y, self.k) - # H = FT.get_H() + if plot_flag: import matplotlib.pyplot as plt @@ -335,12 +312,11 @@ def calc_gFT_apply(stancil, prior): inverse_stats[k] = np.nan print("stats : ", time.perf_counter() - ta) - # Z = complex_represenation(p_hat, FT.M, Lpoints ) - + # multiply with the standard deviation of the data to get dimensions right PSD = power_from_model( p_hat, dk, self.k.size, x.size, Lpoints - ) # Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) + ) if self.k.size * 2 > x.size: col = "red" @@ -348,7 +324,6 @@ def calc_gFT_apply(stancil, prior): col = "blue" if plot_flag: - # PSD_nondim = power_from_model(p_hat , dk, self.k.size, x.size, Lpoints) #Z_to_power_gFT(p_hat, dk, x.size, Lpoints ) plt.plot(self.k, PSD, color=col, label="GFT fit", linewidth=0.5) plt.title( "non-dim Spectral Segment Models, 2M=" @@ -383,31 +358,21 @@ def calc_gFT_apply(stancil, prior): } return return_dict - # stancil[1], p_hat, - # inverse_stats, y_model_grid , - # y_data_grid, x.size, - # PSD, weight, - # inverse_stats['spec_adjust'] # % derive L2 stancil self.stancil_iter = spec.create_chunk_boundaries_unit_lengths( Lmeters, self.xlims, ov=self.ov, iter_flag=True ) - # stancil_iter = create_chunk_boundaries_unit_lengths(L, ( np.round(X.min()), X.max() ), ov= self.ov, iter_flag=True) - + # apply func to all stancils Spec_returns = list() - # form: PSD_from_GFT, weight_used in inversion prior = False, False for ss in copy.copy(self.stancil_iter): - # print(ss) - # prior= False, False - # prior step if prior[0] is False: # make NL fit of piors do not exist print("1st step with NL-fit") I_return = calc_gFT_apply(ss, prior=prior) - prior = I_return["PSD"], I_return["weight"] # I_return[6], I_return[7] + prior = I_return["PSD"], I_return["weight"] # 2nd step if prior[0] is False: @@ -415,9 +380,8 @@ def calc_gFT_apply(stancil, prior): else: print("2nd step use set priors:", type(prior[0]), type(prior[0])) I_return = calc_gFT_apply(ss, prior=prior) - prior = I_return["PSD"], I_return["weight"] # I_return[6], I_return[7] + prior = I_return["PSD"], I_return["weight"] - # print(I_return[6]) Spec_returns.append( dict( (k, I_return[k]) @@ -433,14 +397,7 @@ def calc_gFT_apply(stancil, prior): ) ) ) - # Spec_returns.append( [I_return[0],I_return[1],I_return[2],I_return[3],I_return[4],I_return[5]] ) - - # map_func = map if map_func is None else map_func - # print(map_func) - # Spec_returns = list(map_func( calc_gFT_apply, copy.copy(self.stancil_iter) )) - # # linear version - # Spec_returns = list(map( calc_spectrum_and_field_apply, copy.copy(self.stancil_iter) )) - + # unpack resutls of the mapping: GFT_model = dict() Z_model = dict() @@ -486,22 +443,17 @@ def calc_gFT_apply(stancil, prior): self.N_per_stancil = N_per_stancil chunk_positions = np.array(list(D_specs.keys())) - self.N_stancils = len(chunk_positions) # number of spectral realizatiobs + self.N_stancils = len(chunk_positions) # number of spectral realizations # repack data, create xarray # 1st LS spectal estimates - # G_power_data = dict() - # for xi,I in D_specs.items(): - # G_power_data[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_PSD_data') + G_power_data = make_xarray_from_dict( D_specs, "gFT_PSD_data", ["k"], {"k": self.k} ) G_power_data = xr.concat(G_power_data.values(), dim="x").T # .to_dataset() - # G_power_model = dict() - # for xi,I in D_specs_model.items(): - # G_power_model[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='gFT_PSD_model') G_power_model = make_xarray_from_dict( D_specs_model, "gFT_PSD_model", ["k"], {"k": self.k} ) @@ -511,20 +463,13 @@ def calc_gFT_apply(stancil, prior): self.G.name = "gFT_PSD_model" # 2nd FFT(Y_model) - # G_model_Z =dict() - # for xi,I in Z_model.items(): - # # if I.size < Y_model_k_fft.size: - # # I = np.insert(I, -1, I[-1]) - # G_model_Z[xi] = xr.DataArray(I, dims=['k'], coords={'k': self.k, 'x': xi } , name='Z_hat') G_model_Z = make_xarray_from_dict(Z_model, "Z_hat", ["k"], {"k": self.k}) - G_model_Z = xr.concat(G_model_Z.values(), dim="x").T # .to_dataset() + G_model_Z = xr.concat(G_model_Z.values(), dim="x").T # 3rd GFT_model_coeff_A = dict() GFT_model_coeff_B = dict() for xi, I in GFT_model.items(): - # if I.size < Y_model_k_fft.size: - # I = np.insert(I, -1, I[-1]) GFT_model_coeff_A[xi] = xr.DataArray( I[0], dims=["k"], coords={"k": self.k, "x": xi}, name="gFT_cos_coeff" ) @@ -534,29 +479,22 @@ def calc_gFT_apply(stancil, prior): GFT_model_coeff_A = xr.concat( GFT_model_coeff_A.values(), dim="x" - ).T # .to_dataset() + ).T GFT_model_coeff_B = xr.concat( GFT_model_coeff_B.values(), dim="x" - ).T # .to_dataset() + ).T # add weights to the data weights_k = make_xarray_from_dict(weights, "weight", ["k"], {"k": self.k}) - weights_k = xr.concat(weights_k.values(), dim="x").T # .to_dataset() + weights_k = xr.concat(weights_k.values(), dim="x").T # 4th: model in real space - # y_model_eta =dict() - # y_data_eta =dict() - - # for xi in y_model.keys(): - # y_model_eta[xi] = xr.DataArray(y_model[xi], dims=['eta'], coords={'eta': eta, 'x': xi } , name="y_model") - # y_data_eta[xi] = xr.DataArray(y_data[xi] , dims=['eta'], coords={'eta': eta, 'x': xi } , name="y_data") - eta = np.arange(0, self.Lmeters + self.dx, self.dx) - self.Lmeters / 2 y_model_eta = make_xarray_from_dict(y_model, "y_model", ["eta"], {"eta": eta}) y_data_eta = make_xarray_from_dict(y_data, "y_data", ["eta"], {"eta": eta}) - y_model_eta = xr.concat(y_model_eta.values(), dim="x").T # .to_dataset() - y_data_eta = xr.concat(y_data_eta.values(), dim="x").T # .to_dataset() + y_model_eta = xr.concat(y_model_eta.values(), dim="x").T + y_data_eta = xr.concat(y_data_eta.values(), dim="x").T # merge wavenumber datasets self.GG = xr.merge( @@ -575,19 +513,13 @@ def calc_gFT_apply(stancil, prior): self.GG.coords["N_per_stancil"] = (("x"), N_per_stancil) self.GG.coords["spec_adjust"] = (("x"), Spec_adjust_per_stancil) - # self.GG.expand_dims(dim='eta') - # eta = np.arange(0, self.L + self.dx, self.dx) - self.L/2 - # self.GG.coords['eta'] = ( ('eta'), eta ) - # #self.GG['win'] = ( ('eta'), np.insert(self.win, -1, self.win[-1])) - # create dataframe with fitted parameters and derive y_model and errors # reduce to valid values PP2 = dict() for k, I in Pars.items(): if I is not np.nan: PP2[k] = I - # print(Pars) - # print(PP2) + keys = Pars[next(iter(PP2))].keys() keys_DF = list(set(keys) - set(["model_error_k", "model_error_x"])) params_dataframe = pd.DataFrame(index=keys_DF) @@ -612,7 +544,6 @@ def calc_gFT_apply(stancil, prior): ) sta, ste = xi - self.Lmeters / 2, xi + self.Lmeters / 2 - # x_mask= (sta <= X) & (X <= ste) x_pos = (np.round((X[(sta <= X) & (X <= ste)] - sta) / self.dx)).astype( "int" ) @@ -625,10 +556,9 @@ def calc_gFT_apply(stancil, prior): elif x_pos.size < I["model_error_x"].size: I["model_error_x"] = I["model_error_x"][ 0:-1 - ] # np.append(I['model_error_x'], I['model_error_x'][-1]) + ] print("adjust y") - # print(x_pos.size , I['model_error_x'].size) x_err[x_pos] = I["model_error_x"] model_error_x[xi] = xr.DataArray( x_err, @@ -663,7 +593,6 @@ def calc_gFT_apply(stancil, prior): model_error_x = xr.concat(model_error_x.values(), dim="x").T GG_x = xr.merge([y_model_eta, y_data_eta, model_error_x]) - # model_error_x return self.GG, GG_x, params_dataframe @@ -673,20 +602,13 @@ def calc_var(self): return self.dk * Gmean[~infmask].sum().data - # def parceval(self, add_attrs=True ): - # return wavenumber_spectrogram.parceval(self, add_attrs= add_attrs ) - def parceval(self, add_attrs=True, weight_data=False): "test Parceval theorem" import copy DATA = self.data - L = self.Lmeters X = self.x - # derive mean variances of stancils - # stancil_iter = create_chunk_boundaries_unit_lengths(L, self.xlims, ov= self.ov ) - def get_stancil_var_apply(stancil): from scipy.signal import detrend @@ -717,8 +639,6 @@ def get_stancil_var_apply(stancil): print("Parcevals Theorem:") print("variance of timeseries: ", DATA.var()) print("mean variance of stancils: ", stancil_weighted_variance) - # print('variance of weighted timeseries: ',self.phi.var() ) - # self.calc_var(self) print("variance of the optimzed windowed LS Spectrum: ", self.calc_var()) if add_attrs: @@ -746,7 +666,7 @@ def complex_represenation(p_hat, M, N_x_full): this returns a power spectral density with the same variance as the data without gaps. """ Z = p_hat[0:M] - p_hat[M:] * 1j - Z = Z * (N_x_full / 2 + 1) # this + Z = Z * (N_x_full / 2 + 1) return Z @@ -794,7 +714,7 @@ def power_from_model(p_hat, dk, M, N_x, N_x_full): """ Z = complex_represenation(p_hat, M, N_x_full) - spec, _ = Z_to_power_gFT(Z, dk, N_x, N_x_full) # use spec_incomplete + spec, _ = Z_to_power_gFT(Z, dk, N_x, N_x_full) # spectral density respesenting the incomplete data return spec @@ -812,11 +732,6 @@ def __init__(self, x, ydata, k): self.M = self.k.size # number of wavenumbers self.N = self.x.size # number of datapoints - # if self.non_dimensionalize: - # self.ydata_star = (self.ydata - self.ydata_mean)/self.ydata_std - # else: - # self.ydata_star = self.ydata - if ydata is not None: self.ydata_var = self.ydata.var() self.ydata_mean = self.ydata.mean() @@ -842,8 +757,6 @@ def define_problem(self, P_weight, R_data_uncertainty): """ self.H = self.get_H() - # self.P = np.diag(1/penalties) # penalty 2M x 2M - # self.R = np.diag( data_uncertainty) #Noise Prior N x N self.P_1d = np.concatenate([P_weight, P_weight]) # these are weights again .. self.R_1d = R_data_uncertainty @@ -856,18 +769,6 @@ def solve(self): self.p_hat = is also non-dimensional """ - # standard inversion - # H = self.H - # P = self.P - # R = self.R - # y = self.ydata - # - # Hess = (H.T @ inv(R) @ H ) + inv(P) - # Hess_inv = inv( Hess) - # p_hat = Hess_inv @ H.T @ inv(R) @ y - # - # self.Hess, self.Hess_inv, self.p_hat = Hess, Hess_inv, p_hat - # faster inversion H = self.H P_1d = self.P_1d @@ -884,14 +785,6 @@ def solve(self): return p_hat - # def model(self): - # return self.model_dimensional() - - # def model_dimensional(self): - # " returns the model in dimensional units" - # if 'p_hat' not in self.__dict__: - # raise ValueError('p_hat does not exist, please invert for model first') - # return self.model_nondimensional() * self.ydata_std + self.ydata_mean def model(self): "returns the model dimensional units" @@ -940,17 +833,12 @@ def parceval(self, dk, Nx_full): return pars def get_stats(self, dk, Nx_full, print_flag=False): - # model_error_k = np.diag(self.Hess_inv) - # model_error_real = ((self.H**2) @ self.Hess_inv).sum(1) residual = self.ydata - self.model() - - Lmeters = self.x[-1] - self.x[0] pars = { "data_var": self.ydata.var(), "model_var": self.model().var(), "residual_var": residual.var(), - #'normalized_residual' : residual.var() /self.R_1d.mean(), "model_error_k": np.diag(self.Hess_inv), "model_error_x": ((self.H**2) @ self.Hess_inv).sum(1), "var_sum": self.ydata.var() - self.model().var() - residual.var(), @@ -1018,8 +906,6 @@ def get_peak_pos(y, smooth=30): f_max = self.freq[get_peak_pos(abs(self.data), 30)] self.f_max = f_max - # p_smothed = self.runningmean(np.abs(self.Z ), 20, tailcopy=True) - # f_max = self.freq[p_smothed[~np.isnan(p_smothed)].argmax()] params.add("f_max", f_max, min=f_max * 0.2, max=f_max * 1.5, vary=True) params.add("amp", 0.05, min=0.0001, max=0.1, vary=True) params.add("gamma", 1, min=1, max=3.3, vary=False) @@ -1056,7 +942,6 @@ def optimize(self, fitting_args=None, method="dual_annealing", max_nfev=None): if fitting_args is None: fitting_args = (self.data, self.model_func, self.freq) - # fit_kws= {'maxfun': 1e7} fit_kws = {"maxfun": 1e5} self.weight_func = fitting_args[1] self.fitter = self.LM.minimize( @@ -1086,10 +971,9 @@ def runningmean(self, var, m, tailcopy=False): print("0 Dimension is smaller then averaging length") return rr = np.asarray(var) * np.nan - # print(type(rr)) + var_range = np.arange(m, int(s[0]) - m - 1, 1) for i in var_range[np.isfinite(var[m : int(s[0]) - m - 1])]: - # rm.append(var[i-m:i+m].mean()) rr[int(i)] = np.nanmean(var[i - m : i + m]) if tailcopy: rr[0:m] = rr[m + 1] From 51cd07f9e7454da650979ea1ac8043cf45320d86 Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Fri, 12 Jan 2024 11:46:03 -0500 Subject: [PATCH 08/12] cleaning code, checking nan arrays to remove warnings, formatting file --- .../analysis_db/B03_plot_spectra_ov.py | 44 +++++++++---------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py index 94deba31..e69f6728 100644 --- a/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py +++ b/src/icesat2_tracks/analysis_db/B03_plot_spectra_ov.py @@ -1,9 +1,8 @@ -import os, sys - """ This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. This is python 3 """ +import sys from icesat2_tracks.config.IceSAT2_startup import ( mconfig, @@ -118,7 +117,7 @@ def dict_weighted_mean(Gdict, weight_key): for I in Gdict.items(): I = I.squeeze() if len(I.x) != 0: - GSUM += I.where(~np.isnan(I), 0) * I[weight_key] # .sel(x=GSUM.x) + GSUM += I.where(~np.isnan(I), 0) * I[weight_key] N_per_stancil += I[weight_key] if "N_photons" in GSUM.coords: N_photons += I["N_photons"] @@ -180,18 +179,11 @@ def plot_wavenumber_spectrogram(ax, Gi, clev, title=None, plot_photon_density=Tr Gmean = G_gFT_wmean.rolling(k=5, center=True).mean() try: - k_max_range = ( - Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 0.75, - Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1, - Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data * 1.25, - ) + k_max = Gmean.k[Gmean.isel(x=slice(0, 5)).mean("x").argmax().data].data except: - k_max_range = ( - Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 0.75, - Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1, - Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data * 1.25, - ) + k_max = Gmean.k[Gmean.isel(x=slice(0, 20)).mean("x").argmax().data].data +k_max_range = (k_max * 0.75, k_max * 1, k_max * 1.25) font_for_print() F = M.figure_axis_xy(6.5, 5.6, container=True, view_scale=1) Lmeters = Gk.L.data[0] @@ -254,9 +246,9 @@ def plot_wavenumber_spectrogram(ax, Gi, clev, title=None, plot_photon_density=Tr ) plt.xlim(xlims) -ax0.axhline(2 * np.pi / k_max_range[0], color="red", linestyle="--", linewidth=0.5) -ax0.axhline(2 * np.pi / k_max_range[1], color="red", linestyle="-", linewidth=0.5) -ax0.axhline(2 * np.pi / k_max_range[2], color="red", linestyle="--", linewidth=0.5) +line_styles = ["--", "-", "--"] +for k_max, style in zip(k_max_range, line_styles): + ax0.axhline(2 * np.pi / k_max, color="red", linestyle=style, linewidth=0.5) if pflag: plt.ylabel("Wave length\n(meters)") @@ -390,8 +382,7 @@ def plot_model_eta(D, ax, offset=0, **kargs): print("failed, exit") exit() -fltostr = MT.float_to_str -numtostr = MT.num_to_str +fltostr, numtostr = MT.float_to_str, MT.num_to_str font_for_print() @@ -503,9 +494,13 @@ def plot_model_eta(D, ax, offset=0, **kargs): dd = Gk_1.gFT_PSD_data.rolling(k=10, min_periods=1, center=True).mean() plt.plot(Gk_1.k, dd, color=col_d[k], linewidth=0.8) - dd_max.append(np.nanmax(dd.data)) + # handle the 'All-NaN slice encountered' warning + if np.all(np.isnan(dd.data)): + dd_max.append(np.nan) + else: + dd_max.append(np.nanmax(dd.data)) + plt.xlim(klim) - if lflag: plt.ylabel("$(m/m)^2/k$") plt.title("Energy Spectra", loc="left") @@ -514,10 +509,11 @@ def plot_model_eta(D, ax, offset=0, **kargs): ax11.axvline(k_thresh, linewidth=1, color="gray", alpha=1) ax11.axvspan(k_thresh, klim[-1], color="gray", alpha=0.5, zorder=12) - - if ~np.isnan(np.nanmax(dd_max)): - for ax in ax1_list: - ax.set_ylim(0, np.nanmax(dd_max) * 1.1) + + if not np.all(np.isnan(dd_max)): + max_vale = np.nanmax(dd_max) + for ax in ax1_list: + ax.set_ylim(0,max_vale * 1.1) ax0 = F.fig.add_subplot(gs[-2:, :]) From ee8f2330018849f3cd5220f95a72d3ae39f1976c Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Fri, 12 Jan 2024 15:34:18 -0500 Subject: [PATCH 09/12] removing code and formating file --- src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py index ad0432a5..2ca2ca4d 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py @@ -647,7 +647,7 @@ def get_stancil_var_apply(stancil): self.G.attrs["mean_variance_LS_pwelch_spectrum"] = self.calc_var() def mean_spectral_error(self, mask=None, confidence=0.95): - return wavenumber_spectrogram.mean_spectral_error( + return spec.wavenumber_spectrogram.mean_spectral_error( self, mask=mask, confidence=confidence ) @@ -863,7 +863,7 @@ def get_stats(self, dk, Nx_full, print_flag=False): return pars -class get_prior_spec(object): +class get_prior_spec: def __init__(self, freq, data): """ """ import numpy as np From afe459e0ead458b2cbd45a339f4824aacf0c2bc4 Mon Sep 17 00:00:00 2001 From: Carlos Paniagua Date: Sun, 14 Jan 2024 22:02:33 -0500 Subject: [PATCH 10/12] chore: update class definitions --- analyis_publish/PB05_define_angle.py | 198 ++---------------- analyis_publish/PB05_define_angle_supl.py | 170 ++------------- analysis/SB04_2d_wavefield_emulator.py | 61 +----- analysis_db/B05_define_angle.py | 2 +- .../ICEsat2_SI_tools/angle_optimizer.py | 2 +- .../ICEsat2_SI_tools/generalized_FT.py | 4 +- src/icesat2_tracks/ICEsat2_SI_tools/io.py | 5 +- .../ICEsat2_SI_tools/spectral_estimates.py | 10 +- .../X03_MCMC_surface_smapling.py | 2 +- .../local_modules/m_colormanager_ph3.py | 2 +- .../local_modules/m_general_ph3.py | 16 +- .../local_modules/m_spectrum_ph3.py | 8 +- 12 files changed, 66 insertions(+), 414 deletions(-) diff --git a/analyis_publish/PB05_define_angle.py b/analyis_publish/PB05_define_angle.py index c3226674..fc75cebb 100644 --- a/analyis_publish/PB05_define_angle.py +++ b/analyis_publish/PB05_define_angle.py @@ -1,17 +1,13 @@ -# %% -import os, sys -#execfile(os.environ['PYTHONSTARTUP']) - """ This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. This is python 3 """ +import os, sys + exec(open(os.environ['PYTHONSTARTUP']).read()) exec(open(STARTUP_2021_IceSAT2).read()) -#%matplotlib inline - import ICEsat2_SI_tools.convert_GPS_time as cGPS import h5py import ICEsat2_SI_tools.io as io @@ -36,50 +32,27 @@ col.colormaps2(21) col_dict = col.rels -#import s3fs -# %% track_name, batch_key, test_flag = io.init_from_input(sys.argv) # loads standard experiment -#track_name, batch_key, test_flag = '20190605061807_10380310_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190601094826_09790312_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190207111114_06260210_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190219073735_08070210_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190215184558_07530210_004_01', 'SH_batch02', False -# good track -#track_name, batch_key, test_flag = '20190502021224_05160312_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190502050734_05180310_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190216200800_07690212_004_01', 'SH_batch02', False - -#track_name, batch_key, test_flag = '20190213133330_07190212_004_01', 'SH_batch02', False # main text figure track_name, batch_key, test_flag = 'SH_20190502_05160312', 'SH_publish', False #suppl. figures: -#track_name, batch_key, test_flag = 'SH_20190219_08070210', 'SH_publish', False - -#print(track_name, batch_key, test_flag) hemis, batch = batch_key.split('_') -#track_name= '20190605061807_10380310_004_01' ATlevel= 'ATL03' -#plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/' + track_name + '/B05_angle/' plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/publish/' + track_name + '/' MT.mkdirs_r(plot_path) bad_track_path =mconfig['paths']['work'] +'bad_tracks/'+ batch_key+'/' -# %% all_beams = mconfig['beams']['all_beams'] high_beams = mconfig['beams']['high_beams'] low_beams = mconfig['beams']['low_beams'] beam_groups = mconfig['beams']['groups'] group_names = mconfig['beams']['group_names'] -#Gfilt = io.load_pandas_table_dict(track_name + '_B01_regridded', load_path) # rhis is the rar photon data - -# load_path = mconfig['paths']['work'] +'/B01_regrid_'+hemis+'/' -# G_binned = io.load_pandas_table_dict(track_name + '_B01_binned' , load_path) # load_path = mconfig['paths']['work'] +batch_key +'/B02_spectra/' Gk = xr.load_dataset(load_path+ '/B02_'+track_name + '_gFT_k.nc' ) # @@ -91,22 +64,6 @@ Prior = MT.load_pandas_table_dict('/A02_'+track_name, load_path)['priors_hindcast'] -# font_for_print() -# F = M.figure_axis_xy(5.5, 3, view_scale= 0.8) -# plt.suptitle(track_name) -# ax1 = plt.subplot(2, 1, 1) -# plt.title('Data in Beam', loc= 'left') -# -# xi =1 - -#data = Marginals.isel(x=xi).sel(beam_group= 'group1').marginals -# angle_mask = Marginals.angle[2:-2] -# -#data.T.plot(cmap= plt.cm.OrRd) - -# %% - - def derive_weights(weights): weights = (weights-weights.mean())/weights.std() weights = weights - weights.min() @@ -127,7 +84,6 @@ def weighted_means(data, weights, x_angle, color='k'): k = wi.k.data data_k = data.sel(k=k).squeeze() data_weight = (data_k * wi) - #plt.stairs(data_weight.sum('k')/ weight_norm , x_angle, linewidth=1 , color ='k') if data_k.k.size > 1: for k in data_k.k.data: plt.stairs(data_weight.sel(k=k) / weight_norm, x_angle, color ='gray', alpha =0.5) @@ -190,10 +146,6 @@ def weighted_means(data, weights, x_angle, color='k'): group_weight = Gweights.isel(x =xi) ax_list= dict() - #ax_sum = F.fig.add_subplot(gs[1, 0]) - # #ax_sum.tick_params(labelbottom=False) - # - # ax_list['sum'] = ax_sum data_collect = dict() for group, gpos in zip(Marginals.beam_group.data, [ gs[0, 0], gs[1, 0], gs[2, 0]] ): @@ -215,9 +167,7 @@ def weighted_means(data, weights, x_angle, color='k'): else: data_wmean = weighted_means(data, weights, x_angle, color= col_dict[group] ) plt.stairs(data_wmean , x_angle, color =col_dict[group], alpha =1) - # test if density is correct - # if np.round(np.trapz(data_wmean) * d_angle, 2) < 0.90: - # raise ValueError('weighted mean is not a density anymore') + if group == 'group1': t_string = group_names[group] +' pair' #group.replace('group', @@ -225,31 +175,18 @@ def weighted_means(data, weights, x_angle, color='k'): t_string = group_names[group]+' pair' #group.replace('group', +' ') plt.title(next(fn) + t_string, loc ='left') - #plt.sca(ax_sum) - - # if data_collect is None: - # data_collect = data_wmean - # else: data_collect[group] = data_wmean - #ax0.set_yscale('log') + data_collect = xr.concat(data_collect.values(), dim='beam_group') final_data = (group_weight * data_collect).sum('beam_group')/group_weight.sum('beam_group').data - # plt.sca(ax_sum) - # plt.stairs( final_data , x_angle, color = 'k', alpha =1, linewidth =0.8) - # ax_sum.set_xlabel('Angle (rad)') - # plt.title('Weighted mean over group & wavenumber', loc='left') - # get relevant priors for axx in ax_list.values(): axx.set_ylim(0, final_data.max() * 1.5) - #figureaxx.set_yscale('log') axx.set_xticks(xticks_pi) axx.set_xticklabels(xtick_labels_pi) axx.set_xlim(-np.pi/2, np.pi/2) - #ax_final.set_xticks(xticks_pi) - #ax_final.set_xticklabels(xtick_labels_pi) try: @@ -257,7 +194,6 @@ def weighted_means(data, weights, x_angle, color='k'): ax_list['group2'].set_ylabel('PDF') ax_list['group3'].set_ylabel('PDF') ax_list['group1'].tick_params(labelbottom=True) - #ax_list['group3'].set_xlabel('Angle (rad)') except: pass @@ -271,9 +207,6 @@ def weighted_means(data, weights, x_angle, color='k'): plt.stairs( final_data , x_angle, color = 'k', alpha =0.5, linewidth =0.8, zorder= 12) final_data_smth = lanczos.lanczos_filter_1d(x_angle,final_data, 0.1) - # - # for group in Marginals.beam_group.data: - # plt.stairs( data_collect.sel(beam_group= group) * group_weight.sel(beam_group= group) /group_weight.sum('beam_group').data, x_angle, color =col_dict[group], alpha =1) plt.plot(x_angle[0:-1], final_data_smth, color = 'black', linewidth= 0.8) @@ -299,9 +232,6 @@ def weighted_means(data, weights, x_angle, color='k'): Gpdf = xr.merge([M_final,M_final_smth]) Gpdf.weighted_angle_PDF_smth.plot() -#Gpdf.isel( x=slice(0, 3 )).weighted_angle_PDF_smth.mean('x') -#Gpdf.angle[Gpdf.mean('x').weighted_angle_PDF_smth.argmax()].data - Gpdf.mean('x').weighted_angle_PDF_smth.plot() best_guess_angle = Gpdf.angle[Gpdf.mean('x').weighted_angle_PDF_smth.argmax()].data @@ -309,19 +239,8 @@ def weighted_means(data, weights, x_angle, color='k'): best_guess_angle/np.pi Gpdf.mean('x').weighted_angle_PDF_smth.plot() -#Gpdf.weighted_angle_PDF.where(~np.isnan(Gpdf.weighted_angle_PDF),0 ).plot() - -# if len(Gpdf.x) < 2: -# print('not enough x data, exit') -# MT.json_save('B05_fail', plot_path+'../', {'time':time.asctime( time.localtime(time.time()) ) , 'reason': 'not enough x segments'}) -# print('exit()') -# exit() - - - -# %% -class plot_polarspectra(object): +class plot_polarspectra: def __init__(self,k, thetas, data, data_type='fraction' ,lims=None, verbose=False): """ @@ -334,22 +253,21 @@ def __init__(self,k, thetas, data, data_type='fraction' ,lims=None, verbose=Fal #self.sample_unit=sample_unit if sample_unit is not None else 'df' # decided on freq limit - self.lims= lims = [self.k.min(),self.k.max()] if lims is None else lims #1.0 /lims[1], 1.0/ lims[0] + self.lims= lims = [self.k.min(),self.k.max()] if lims is None else lims freq_sel_bool=M.cut_nparray(self.k, lims[0], lims[1] ) - self.min=np.round(np.nanmin(data[freq_sel_bool,:]), 2)#*0.5e-17 + self.min=np.round(np.nanmin(data[freq_sel_bool,:]), 2) self.max=np.round(np.nanmax(data[freq_sel_bool,:]), 2) if verbose: print(str(self.min), str(self.max) ) - self.klabels=np.linspace(self.min, self.max, 5) #np.arange(10, 100, 20) + self.klabels=np.linspace(self.min, self.max, 5) self.data_type=data_type if data_type == 'fraction': self.clevs=np.linspace(np.nanpercentile(dir_data.data, 1), np.ceil(self.max* 0.9), 21) elif data_type == 'energy': self.ctrs_min=self.min+self.min*.05 - #self.clevs=np.linspace(self.min, self.max, 21) self.clevs=np.linspace(self.min+self.min*.05, self.max*.60, 21) @@ -359,35 +277,27 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): """ if ax is None: ax = plt.subplot(111, polar=True) - #self.title = plt.suptitle(' Polar Spectrum', y=0.95, x=0.5 , horizontalalignment='center') else: ax=ax - ax.set_theta_direction(-1) #right turned postive + ax.set_theta_direction(-1) ax.set_theta_zero_location("W") grid=ax.grid(color='k', alpha=.5, linestyle='-', linewidth=.5) if self.data_type == 'fraction': - cm=plt.cm.RdYlBu_r #brewer2mpl.get_map( 'RdYlBu','Diverging', 4, reverse=True).mpl_colormap + cm=plt.cm.RdYlBu_r colorax = ax.contourf(self.thetas,self.k, self.data, self.clevs, cmap=cm, zorder=1)# ,cmap=cm)#, vmin=self.ctrs_min) elif self.data_type == 'energy': - cm=plt.cm.Paired#brewer2mpl.get_map( 'Paired','Qualitative', 8).mpl_colormap + cm=plt.cm.Paired cm.set_under='w' cm.set_bad='w' - colorax = ax.contourf(self.thetas,self.k, self.data, self.clevs, cmap=cm, zorder=1)#, vmin=self.ctrs_min) - #divider = make_axes_locatable(ax) - #cax = divider.append_axes("right", size="5%", pad=0.05) + colorax = ax.contourf(self.thetas,self.k, self.data, self.clevs, cmap=cm, zorder=1) self.colorax = colorax if cbar_flag: cbar = plt.colorbar(colorax, fraction=0.046, pad=0.1, orientation="horizontal") - # if self.data_type == 'fraction': - # cbar.set_label('Energy Distribution', rotation=0, fontsize=fontsize) - # elif self.data_type == 'energy': - # cbar.set_label('Energy Density ('+self.unit+')', rotation=0, fontsize=fontsize) cbar.ax.get_yaxis().labelpad = 30 cbar.outline.set_visible(False) - #cbar.ticks. clev_tick_names, clev_ticks =MT.tick_formatter(FP.clevs, expt_flag= False, shift= 0, rounder=4, interval=1) cbar.set_ticks(clev_ticks[::5]) cbar.set_ticklabels(clev_tick_names[::5]) @@ -399,7 +309,6 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): radial_ticks = np.arange(60, 1000, 20) print(radial_ticks) xx_tick_names, xx_ticks = MT.tick_formatter( radial_ticks , expt_flag= False, shift= 1, rounder=0, interval=1) - #xx_tick_names, xx_ticks = MT.tick_formatter( np.arange( np.floor(self.k.min()),self.k.max(), 20) , expt_flag= False, shift= 1, rounder=0, interval=1) xx_tick_names = [' '+str(d)+'m' for d in xx_tick_names] ax.set_yticks(xx_ticks[::1]) @@ -415,26 +324,22 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): lines, labels = plt.thetagrids(degrange, labels=degrange_label)#, frac = 1.07) for line in lines: - #L=line.get_xgridlines line.set_linewidth(5) - #line.set_linestyle(':') - #ax.set_yscale('log') ax.set_ylim(self.lims) ax.spines['polar'].set_color("none") ax.set_rlabel_position(87) self.ax=ax -# %% font_for_print() fn = copy.copy(lstrings) F = M.figure_axis_xy(fig_sizes['two_column_square'][0], fig_sizes['two_column_square'][1], view_scale= 0.7, container = True) -gs = GridSpec(8,6, wspace=0.1, hspace=2.1)#figure=fig, +gs = GridSpec(8,6, wspace=0.1, hspace=2.1) col.colormaps2(21) -cmap_spec= col.white_base_blgror #plt.cm.ocean_r +cmap_spec= col.white_base_blgror clev_spec = np.linspace(-8, -1, 21) *10 cmap_angle= col.cascade_r @@ -452,26 +357,18 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): k_low = (k_low_limits + k_low_limits.diff('k')[0]/2).data weighted_spec_sub['k_bins'] = k_low[0:-1] weighted_spec_sub = weighted_spec_sub.rename({'k_bins': 'k'}) -#weighted_spec_sub = weighted_spec lam_p = 2 *np.pi/k_low_limits lam = lam_p * np.cos(best_guess_angle) k = 2 * np.pi/lam - -#weighted_spec.k/np.cos(best_guess_angle) - -#xlims = x_spec[0]-12.5/2, x_spec[-5] xlims = x_spec[0], x_spec[-5] -#weighted_spec.plot() -#clev_spec = np.linspace(-8, -1, 21) *10 clev_spec = np.linspace(-80, (10* np.log(weighted_spec)).max() * 0.9, 21) dd = 10* np.log(weighted_spec_sub) clev_log = M.clevels( [dd.quantile(0.01).data * 0.3, dd.quantile(0.98).data * 2.5], 31)* 1 -#plt.pcolor(x_spec, k, dd ,vmin= clev_spec[0], vmax= clev_spec[-1], cmap =cmap_spec ) plt.pcolormesh(x_spec, lam, dd, cmap=cmap_spec , vmin = clev_log[0], vmax = clev_log[-1]) @@ -486,19 +383,14 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): cbar = plt.colorbar( fraction=0.018, pad=0.01, orientation="vertical", label ='Power') cbar.outline.set_visible(False) clev_ticks = np.round(clev_spec[::3], 0) -#clev_tick_names, clev_ticks =MT.tick_formatter(clev_spec, expt_flag= False, shift= 0, rounder=1, interval=2) cbar.set_ticks(clev_ticks) cbar.set_ticklabels(clev_ticks) plt.ylabel('corrected wavelength $(m)$') -#plt.xlabel('x (km)') - -#plt.colorbar() ax2 = F.fig.add_subplot(gs[3:5, :]) ax2.tick_params(labelleft=True) -#Gpdf.weighted_angle_PDF.where(~np.isnan(Gpdf.weighted_angle_PDF),0 ).T.plot() -dir_data = Gpdf.interp(x= weighted_spec.x).weighted_angle_PDF_smth.T#.rolling(angle=5, min_periods= 1, center=True).mean() +dir_data = Gpdf.interp(x= weighted_spec.x).weighted_angle_PDF_smth.T x = Gpdf.x/1e3 angle = Gpdf.angle[::10] @@ -508,24 +400,16 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): dir_data_sub['angle_bins'] = angle_low[0:-1] dir_data_sub = dir_data_sub.rename({'angle_bins': 'angle'}) plt.pcolormesh(dir_data_sub.x/1e3, dir_data_sub.angle, dir_data_sub , vmin= clev_angle[0], vmax= clev_angle[-1], cmap = cmap_spec) - -#plt.pcolormesh(dir_data.x/1e3, dir_data.angle, dir_data , vmin= clev_angle[0], vmax= clev_angle[-1], cmap = cmap_spec) - - cbar = plt.colorbar( fraction=0.02, pad=0.01, orientation="vertical", label ='Density') cbar.outline.set_visible(False) plt.title(next(fn) + 'Direction PDFs', loc='left') - - plt.ylabel('Angle') plt.xlabel('X (km)') - ax2.set_yticks(xticks_pi) ax2.set_yticklabels(xtick_labels_pi) ax2.set_ylim(angle[0], angle[-1]) - x_ticks = np.arange(0, xlims[-1].data, 25) x_tick_labels, x_ticks = MT.tick_formatter(x_ticks, expt_flag= False, shift= 0, rounder=1, interval=2) @@ -534,32 +418,19 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): ax1.set_xticklabels(x_tick_labels) ax2.set_xticklabels(x_tick_labels) -#ax1.set_yscale('log') lam_lim= lam[-1].data, 550 ax1.set_ylim(lam_lim) ax1.set_xlim(xlims) ax2.set_xlim(xlims) -#ax2.set_yscale('log') ax2.axhline(best_guess_angle, color=col.orange, linewidth=0.8) - -#xx_list = np.insert(weighted_spec.x.data, 0, 0) -# x_pos_list = spec.create_chunk_boundaries( 1, xx_list.size, iter_flag= False ) -# #x_pos_list = spec.create_chunk_boundaries( int(xx_list.size/3), xx_list.size, iter_flag= False ) -# x_pos_list = x_pos_list[:, ::2] -# x_pos_list[-1, -1] = xx_list.size-1re -#x_pos_list#.shape - -x_pos_list = [0, 1, 2]#np.arange(0,9, 1)#np.vstack([np.arange(1,3), np.arange(0,3)+1]) -#x_pos_list +x_pos_list = [0, 1, 2] lsrtrings = iter(['c)', 'd)', 'e)']) dir_ax_list =list() for x_pos, gs in zip( x_pos_list , [ gs[-3:, 0:2], gs[-3:, 2:4], gs[-3:, 4:]] ): - #print( x_pos) - #print( xx_list[x_pos]) x_range = weighted_spec.x.data[x_pos] + 12.5e3/2 #, x_pos[-1]]] print(x_range) ax1.axvline(x_range/1e3, linestyle= '-', color= col.green, linewidth=0.9, alpha = 0.8) @@ -567,37 +438,17 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): i_lstring = next(lsrtrings) ax1.text(x_range/1e3, np.array(lam_lim).mean()* 1.2 * 3/2, ' '+ i_lstring, fontsize= 8, color =col.green) - #ax2.text(x_range/1e3, weighted_spec.k.mean().data, ' a', fontsize= 8) - - - # ax1.axvline(x_range[-1]/1e3, color = 'gray', alpha = 0.5) - # - # ax2.axvline(x_range[0]/1e3, linestyle= ':', color= 'white', alpha = 0.5) - # ax2.axvline(x_range[-1]/1e3, color = 'gray', alpha = 0.5) - # i_spec = weighted_spec.sel(x= slice(x_range[0], x_range[-1]) ) - # i_dir = corrected_marginals.sel(x= slice(x_range[0], x_range[-1]) ) i_spec = weighted_spec.isel(x= x_pos ) i_dir = corrected_marginals.interp(x= weighted_spec.x).isel(x= x_pos ) print(i_spec.x.data, i_spec.x.data) dir_data = (i_dir * i_dir.N_data).sum([ 'beam_group'])/ i_dir.N_data.sum([ 'beam_group']) lims = dir_data.k[ (dir_data.sum('angle')!=0) ][0].data, dir_data.k[ (dir_data.sum('angle')!=0) ][-1].data - #dir_data.plot() - #dir_data.rolling(angle =5, min_periods= 1, center=True ).mean().plot() - N_angle = i_dir.angle.size - dir_data2 = dir_data#.where( dir_data.sum('angle') !=0, 1/N_angle/d_angle ) - - plot_data = dir_data2 * i_spec#.mean('x') - - # angle_low = dir_data2.angle[::5] - # k_low = dir_data2.k[::5] - # plot_data = dir_data2.groupby_bins('angle' , angle_low).mean().groupby_bins('k', k_low).mean() - # plot_data = plot_data.rename({'k_bins':'k', 'angle_bins': 'angle'}) - # plot_data['k'] = (k_low + k_low.diff('k')[0]/2).data[0:-1] - # plot_data['angle'] =(angle_low + angle_low.diff('angle')[0]/2).data[0:-1] + dir_data2 = dir_data + plot_data = dir_data2 * i_spec plot_data = dir_data2.rolling(angle =2, k =15, min_periods= 1, center=True ).median() * i_spec#.mean('x') plot_data = plot_data.sel(k=slice(lims[0],lims[-1] ) ) @@ -605,16 +456,12 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): lam_p = 2 *np.pi/plot_data.k.data lam = lam_p * np.cos(best_guess_angle) - #F = M.figure_axis_xy(5, 4) - #ax = plt.subplot(1, 1, 1, polar=True) - # if np.nanmax(plot_data.data) != np.nanmin(plot_data.data): ax3 = F.fig.add_subplot(gs, polar=True) FP= plot_polarspectra(lam, plot_data.angle, plot_data, lims=[lam[-1], 138 ] , verbose= False, data_type= 'fraction') FP.clevs=np.linspace(np.nanpercentile(plot_data.data, 1), np.round(plot_data.max(), 4), 21) FP.linear(ax = ax3, cbar_flag=False) - #plt.show() plt.title('\n\n'+i_lstring,y=1.0, pad=-6, color=col.green) dir_ax_list.append(ax3) @@ -630,11 +477,7 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): cbar.set_label('Energy Density \n(10$^3$ (m/m)$^2$ k$^{-1}$ deg$^{-1}$ )', rotation=90)#, fontsize=10) -# F.save_pup(path = plot_path, name = 'B05_dir_ov_'+track_name) -# F.save_light(path = plot_path, name = 'B05_dir_ov_'+track_name) - - -# %% shift simple +# shift simple font_for_print() fn = copy.copy(lstrings) @@ -658,6 +501,3 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): plt.ylabel('$m^2/\lambda$') F.save_pup(path = plot_path, name = 'B05_dir_ov_'+track_name+'_1d') -# %% -#F.save_pup(path = plot_path, name = 'B05_dir_ov_'+track_name) -# MT.json_save('B05_success', plot_path + '../', {'time':time.asctime( time.localtime(time.time()) )}) diff --git a/analyis_publish/PB05_define_angle_supl.py b/analyis_publish/PB05_define_angle_supl.py index 315248dc..7f0880ae 100644 --- a/analyis_publish/PB05_define_angle_supl.py +++ b/analyis_publish/PB05_define_angle_supl.py @@ -36,51 +36,23 @@ col.colormaps2(21) col_dict = col.rels -#import s3fs -# %% track_name, batch_key, test_flag = io.init_from_input(sys.argv) # loads standard experiment -#track_name, batch_key, test_flag = '20190605061807_10380310_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190601094826_09790312_004_01', 'SH_batch01', False -#track_name, batch_key, test_flag = '20190207111114_06260210_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190219073735_08070210_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190215184558_07530210_004_01', 'SH_batch02', False - -# good track -#track_name, batch_key, test_flag = '20190502021224_05160312_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190502050734_05180310_004_01', 'SH_batch02', False -#track_name, batch_key, test_flag = '20190216200800_07690212_004_01', 'SH_batch02', False - -#track_name, batch_key, test_flag = '20190213133330_07190212_004_01', 'SH_batch02', False - -# main text figure -#track_name, batch_key, test_flag = 'SH_20190502_05160312', 'SH_publish', False - -#suppl. figures: -#track_name, batch_key, test_flag = 'SH_20190219_08070210', 'SH_publish', False track_name, batch_key, test_flag = 'SH_20190224_08800210', 'SH_publish', False -#print(track_name, batch_key, test_flag) hemis, batch = batch_key.split('_') -#track_name= '20190605061807_10380310_004_01' ATlevel= 'ATL03' -#plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/' + track_name + '/B05_angle/' plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/publish/' + track_name + '/' MT.mkdirs_r(plot_path) bad_track_path =mconfig['paths']['work'] +'bad_tracks/'+ batch_key+'/' -# %% all_beams = mconfig['beams']['all_beams'] high_beams = mconfig['beams']['high_beams'] low_beams = mconfig['beams']['low_beams'] beam_groups = mconfig['beams']['groups'] group_names = mconfig['beams']['group_names'] -#Gfilt = io.load_pandas_table_dict(track_name + '_B01_regridded', load_path) # rhis is the rar photon data - -# load_path = mconfig['paths']['work'] +'/B01_regrid_'+hemis+'/' -# G_binned = io.load_pandas_table_dict(track_name + '_B01_binned' , load_path) # load_path = mconfig['paths']['work'] +batch_key +'/B02_spectra/' Gk = xr.load_dataset(load_path+ '/B02_'+track_name + '_gFT_k.nc' ) # @@ -92,22 +64,6 @@ Prior = MT.load_pandas_table_dict('/A02_'+track_name, load_path)['priors_hindcast'] -# font_for_print() -# F = M.figure_axis_xy(5.5, 3, view_scale= 0.8) -# plt.suptitle(track_name) -# ax1 = plt.subplot(2, 1, 1) -# plt.title('Data in Beam', loc= 'left') -# -# xi =1 - -#data = Marginals.isel(x=xi).sel(beam_group= 'group1').marginals -# angle_mask = Marginals.angle[2:-2] -# -#data.T.plot(cmap= plt.cm.OrRd) - -# %% - - def derive_weights(weights): weights = (weights-weights.mean())/weights.std() weights = weights - weights.min() @@ -128,7 +84,6 @@ def weighted_means(data, weights, x_angle, color='k'): k = wi.k.data data_k = data.sel(k=k).squeeze() data_weight = (data_k * wi) - #plt.stairs(data_weight.sum('k')/ weight_norm , x_angle, linewidth=1 , color ='k') if data_k.k.size > 1: for k in data_k.k.data: plt.stairs(data_weight.sel(k=k) / weight_norm, x_angle, color ='gray', alpha =0.5) @@ -187,14 +142,9 @@ def weighted_means(data, weights, x_angle, color='k'): tname = track_name.split('_')[1]+'\non '+ track_name.split('_')[0][0:8] plt.suptitle('Weighted marginal PDFs for \n$X_i$='+ x_str +' km for track '+tname, y= 1.03, x = 0.125, horizontalalignment= 'left') - #plt.suptitle('Weighted marginal PDFs\nx='+ x_str +'\n'+track_name, y= 1.05, x = 0.125, horizontalalignment= 'left') group_weight = Gweights.isel(x =xi) ax_list= dict() - #ax_sum = F.fig.add_subplot(gs[1, 0]) - # #ax_sum.tick_params(labelbottom=False) - # - # ax_list['sum'] = ax_sum data_collect = dict() for group, gpos in zip(Marginals.beam_group.data, [ gs[0, 0], gs[1, 0], gs[2, 0]] ): @@ -216,41 +166,24 @@ def weighted_means(data, weights, x_angle, color='k'): else: data_wmean = weighted_means(data, weights, x_angle, color= col_dict[group] ) plt.stairs(data_wmean , x_angle, color =col_dict[group], alpha =1) - # test if density is correct - # if np.round(np.trapz(data_wmean) * d_angle, 2) < 0.90: - # raise ValueError('weighted mean is not a density anymore') if group == 'group1': - t_string = group_names[group] +' pair' #group.replace('group', + t_string = group_names[group] +' pair' else: - t_string = group_names[group]+' pair' #group.replace('group', +' ') + t_string = group_names[group]+' pair' plt.title(next(fn) + t_string, loc ='left') - #plt.sca(ax_sum) - - # if data_collect is None: - # data_collect = data_wmean - # else: data_collect[group] = data_wmean - #ax0.set_yscale('log') data_collect = xr.concat(data_collect.values(), dim='beam_group') final_data = (group_weight * data_collect).sum('beam_group')/group_weight.sum('beam_group').data - # plt.sca(ax_sum) - # plt.stairs( final_data , x_angle, color = 'k', alpha =1, linewidth =0.8) - # ax_sum.set_xlabel('Angle (rad)') - # plt.title('Weighted mean over group & wavenumber', loc='left') - # get relevant priors for axx in ax_list.values(): axx.set_ylim(0, final_data.max() * 1.5) - #figureaxx.set_yscale('log') axx.set_xticks(xticks_pi) axx.set_xticklabels(xtick_labels_pi) axx.set_xlim(-np.pi/2, np.pi/2) - #ax_final.set_xticks(xticks_pi) - #ax_final.set_xticklabels(xtick_labels_pi) try: @@ -258,7 +191,6 @@ def weighted_means(data, weights, x_angle, color='k'): ax_list['group2'].set_ylabel('PDF') ax_list['group3'].set_ylabel('PDF') ax_list['group1'].tick_params(labelbottom=True) - #ax_list['group3'].set_xlabel('Angle (rad)') except: pass @@ -272,9 +204,6 @@ def weighted_means(data, weights, x_angle, color='k'): plt.stairs( final_data , x_angle, color = 'k', alpha =0.5, linewidth =0.8, zorder= 12) final_data_smth = lanczos.lanczos_filter_1d(x_angle,final_data, 0.1) - # - # for group in Marginals.beam_group.data: - # plt.stairs( data_collect.sel(beam_group= group) * group_weight.sel(beam_group= group) /group_weight.sum('beam_group').data, x_angle, color =col_dict[group], alpha =1) plt.plot(x_angle[0:-1], final_data_smth, color = 'black', linewidth= 0.8) @@ -300,8 +229,6 @@ def weighted_means(data, weights, x_angle, color='k'): Gpdf = xr.merge([M_final,M_final_smth]) Gpdf.weighted_angle_PDF_smth.plot() -#Gpdf.isel( x=slice(0, 3 )).weighted_angle_PDF_smth.mean('x') -#Gpdf.angle[Gpdf.mean('x').weighted_angle_PDF_smth.argmax()].data Gpdf.mean('x').weighted_angle_PDF_smth.plot() best_guess_angle = Gpdf.angle[Gpdf.mean('x').weighted_angle_PDF_smth.argmax()].data @@ -310,17 +237,8 @@ def weighted_means(data, weights, x_angle, color='k'): best_guess_angle/np.pi Gpdf.mean('x').weighted_angle_PDF_smth.plot() -#Gpdf.weighted_angle_PDF.where(~np.isnan(Gpdf.weighted_angle_PDF),0 ).plot() - -# if len(Gpdf.x) < 2: -# print('not enough x data, exit') -# MT.json_save('B05_fail', plot_path+'../', {'time':time.asctime( time.localtime(time.time()) ) , 'reason': 'not enough x segments'}) -# print('exit()') -# exit() - -# %% -class plot_polarspectra(object): +class plot_polarspectra: def __init__(self,k, thetas, data, data_type='fraction' ,lims=None, verbose=False): """ @@ -330,9 +248,6 @@ def __init__(self,k, thetas, data, data_type='fraction' ,lims=None, verbose=Fal self.k =k self.data =data self.thetas =thetas - - #self.sample_unit=sample_unit if sample_unit is not None else 'df' - # decided on freq limit self.lims= lims = [self.k.min(),self.k.max()] if lims is None else lims #1.0 /lims[1], 1.0/ lims[0] freq_sel_bool=M.cut_nparray(self.k, lims[0], lims[1] ) @@ -348,7 +263,6 @@ def __init__(self,k, thetas, data, data_type='fraction' ,lims=None, verbose=Fal self.clevs=np.linspace(np.nanpercentile(dir_data.data, 1), np.ceil(self.max* 0.9), 21) elif data_type == 'energy': self.ctrs_min=self.min+self.min*.05 - #self.clevs=np.linspace(self.min, self.max, 21) self.clevs=np.linspace(self.min+self.min*.05, self.max*.60, 21) @@ -358,7 +272,6 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): """ if ax is None: ax = plt.subplot(111, polar=True) - #self.title = plt.suptitle(' Polar Spectrum', y=0.95, x=0.5 , horizontalalignment='center') else: ax=ax ax.set_theta_direction(-1) #right turned postive @@ -367,25 +280,18 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): grid=ax.grid(color='k', alpha=.5, linestyle='-', linewidth=.5) if self.data_type == 'fraction': - cm=plt.cm.RdYlBu_r #brewer2mpl.get_map( 'RdYlBu','Diverging', 4, reverse=True).mpl_colormap - colorax = ax.contourf(self.thetas,self.k, self.data, self.clevs, cmap=cm, zorder=1)# ,cmap=cm)#, vmin=self.ctrs_min) + cm=plt.cm.RdYlBu_r + colorax = ax.contourf(self.thetas,self.k, self.data, self.clevs, cmap=cm, zorder=1) elif self.data_type == 'energy': - cm=plt.cm.Paired#brewer2mpl.get_map( 'Paired','Qualitative', 8).mpl_colormap + cm=plt.cm.Paired cm.set_under='w' cm.set_bad='w' - colorax = ax.contourf(self.thetas,self.k, self.data, self.clevs, cmap=cm, zorder=1)#, vmin=self.ctrs_min) - #divider = make_axes_locatable(ax) - #cax = divider.append_axes("right", size="5%", pad=0.05) + colorax = ax.contourf(self.thetas,self.k, self.data, self.clevs, cmap=cm, zorder=1) if cbar_flag: cbar = plt.colorbar(colorax, fraction=0.046, pad=0.1, orientation="horizontal") - # if self.data_type == 'fraction': - # cbar.set_label('Energy Distribution', rotation=0, fontsize=fontsize) - # elif self.data_type == 'energy': - # cbar.set_label('Energy Density ('+self.unit+')', rotation=0, fontsize=fontsize) cbar.ax.get_yaxis().labelpad = 30 cbar.outline.set_visible(False) - #cbar.ticks. clev_tick_names, clev_ticks =MT.tick_formatter(FP.clevs, expt_flag= False, shift= 0, rounder=4, interval=1) cbar.set_ticks(clev_ticks[::5]) cbar.set_ticklabels(clev_tick_names[::5]) @@ -397,7 +303,6 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): radial_ticks = np.arange(100, 1000, 50) print(radial_ticks) xx_tick_names, xx_ticks = MT.tick_formatter( radial_ticks , expt_flag= False, shift= 1, rounder=0, interval=1) - #xx_tick_names, xx_ticks = MT.tick_formatter( np.arange( np.floor(self.k.min()),self.k.max(), 20) , expt_flag= False, shift= 1, rounder=0, interval=1) xx_tick_names = [' '+str(d)+'m' for d in xx_tick_names] ax.set_yticks(xx_ticks[::1]) @@ -413,26 +318,22 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): lines, labels = plt.thetagrids(degrange, labels=degrange_label)#, frac = 1.07) for line in lines: - #L=line.get_xgridlines line.set_linewidth(5) - #line.set_linestyle(':') - #ax.set_yscale('log') ax.set_ylim(self.lims) ax.spines['polar'].set_color("none") ax.set_rlabel_position(87) self.ax=ax -# %% font_for_print() fn = copy.copy(lstrings) F = M.figure_axis_xy(fig_sizes['two_column_square'][0], fig_sizes['two_column_square'][1], view_scale= 0.7, container = True) -gs = GridSpec(8,6, wspace=0.1, hspace=2.1)#figure=fig, +gs = GridSpec(8,6, wspace=0.1, hspace=2.1) col.colormaps2(21) -cmap_spec= col.white_base_blgror #plt.cm.ocean_r +cmap_spec= col.white_base_blgror clev_spec = np.linspace(-8, -1, 21) *10 cmap_angle= col.cascade_r @@ -448,16 +349,12 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): lam_p = 2 *np.pi/weighted_spec.k lam = lam_p * np.cos(best_guess_angle) k = 2 * np.pi/lam -#weighted_spec.k/np.cos(best_guess_angle) xlims = x_spec[0]-12.5/2, x_spec[-5] -#weighted_spec.plot() -#clev_spec = np.linspace(-8, -1, 21) *10 clev_spec = np.linspace(-80, (10* np.log(weighted_spec)).max() * 0.9, 21) dd = 10* np.log(weighted_spec.rolling(k=10, min_periods= 1, center=True).mean()) clev_log = M.clevels( [dd.quantile(0.01).data * 0.3, dd.quantile(0.98).data * 1.5], 31)* 1 -#plt.pcolor(x_spec, k, dd ,vmin= clev_spec[0], vmax= clev_spec[-1], cmap =cmap_spec ) plt.pcolormesh(x_spec, lam, dd, cmap=cmap_spec , vmin = clev_log[0], vmax = clev_log[-1]) @@ -473,19 +370,14 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): cbar = plt.colorbar( fraction=0.018, pad=0.01, orientation="vertical", label ='Power') cbar.outline.set_visible(False) clev_ticks = np.round(clev_spec[::3], 0) -#clev_tick_names, clev_ticks =MT.tick_formatter(clev_spec, expt_flag= False, shift= 0, rounder=1, interval=2) cbar.set_ticks(clev_ticks) cbar.set_ticklabels(clev_ticks) plt.ylabel('corrected wavelength $(m)$') -#plt.xlabel('x (km)') - -#plt.colorbar() ax2 = F.fig.add_subplot(gs[3:5, :]) ax2.tick_params(labelleft=True) -#Gpdf.weighted_angle_PDF.where(~np.isnan(Gpdf.weighted_angle_PDF),0 ).T.plot() -dir_data = Gpdf.interp(x= weighted_spec.x).weighted_angle_PDF_smth.T#.rolling(angle=5, min_periods= 1, center=True).mean() +dir_data = Gpdf.interp(x= weighted_spec.x).weighted_angle_PDF_smth.T x = Gpdf.x/1e3 angle = Gpdf.angle @@ -513,81 +405,49 @@ def linear(self, radial_axis='period', ax=None, cbar_flag=True): ax1.set_xticklabels(x_tick_labels) ax2.set_xticklabels(x_tick_labels) -#ax1.set_yscale('log') lam_lim= lam[-1].data, lam[10].data ax1.set_ylim(lam_lim) ax1.set_xlim(xlims) ax2.set_xlim(xlims) -#ax2.set_yscale('log') ax2.axhline(best_guess_angle, color=col.orange, linewidth=0.8) -#xx_list = np.insert(weighted_spec.x.data, 0, 0) -# x_pos_list = spec.create_chunk_boundaries( 1, xx_list.size, iter_flag= False ) -# #x_pos_list = spec.create_chunk_boundaries( int(xx_list.size/3), xx_list.size, iter_flag= False ) -# x_pos_list = x_pos_list[:, ::2] -# x_pos_list[-1, -1] = xx_list.size-1re -#x_pos_list#.shape - -x_pos_list = [0, 1, 2]#np.arange(0,9, 1)#np.vstack([np.arange(1,3), np.arange(0,3)+1]) +x_pos_list = [0, 1, 2] #x_pos_list lsrtrings = iter(['c)', 'd)', 'e)']) for x_pos, gs in zip( x_pos_list , [ gs[-3:, 0:2], gs[-3:, 2:4], gs[-3:, 4:]] ): - #print( x_pos) - #print( xx_list[x_pos]) - x_range = weighted_spec.x.data[x_pos]#, x_pos[-1]]] + x_range = weighted_spec.x.data[x_pos] print(x_range) ax1.axvline(x_range/1e3, linestyle= '-', color= col.green, linewidth=0.9, alpha = 0.8) ax2.axvline(x_range/1e3, linestyle= '-', color= col.green, linewidth=0.9, alpha = 0.8) i_lstring = next(lsrtrings) ax1.text(x_range/1e3, np.array(lam_lim).mean()*3/2, ' '+ i_lstring, fontsize= 8, color =col.green) - #ax2.text(x_range/1e3, weighted_spec.k.mean().data, ' a', fontsize= 8) - - - # ax1.axvline(x_range[-1]/1e3, color = 'gray', alpha = 0.5) - # - # ax2.axvline(x_range[0]/1e3, linestyle= ':', color= 'white', alpha = 0.5) - # ax2.axvline(x_range[-1]/1e3, color = 'gray', alpha = 0.5) - - # i_spec = weighted_spec.sel(x= slice(x_range[0], x_range[-1]) ) - # i_dir = corrected_marginals.sel(x= slice(x_range[0], x_range[-1]) ) i_spec = weighted_spec.isel(x= x_pos ) i_dir = corrected_marginals.interp(x= weighted_spec.x).isel(x= x_pos ) print(i_spec.x.data, i_spec.x.data) dir_data = (i_dir * i_dir.N_data).sum([ 'beam_group'])/ i_dir.N_data.sum([ 'beam_group']) lims = dir_data.k[ (dir_data.sum('angle')!=0) ][0].data, dir_data.k[ (dir_data.sum('angle')!=0) ][-1].data - #dir_data.plot() - #dir_data.rolling(angle =5, min_periods= 1, center=True ).mean().plot() - N_angle = i_dir.angle.size - dir_data2 = dir_data#.where( dir_data.sum('angle') !=0, 1/N_angle/d_angle ) + dir_data2 = dir_data - plot_data = dir_data2 * i_spec#.mean('x') - plot_data = dir_data2.rolling(angle =2, k =15, min_periods= 1, center=True ).mean() * i_spec#.mean('x') + plot_data = dir_data2 * i_spec + plot_data = dir_data2.rolling(angle =2, k =15, min_periods= 1, center=True ).mean() * i_spec plot_data = plot_data.sel(k=slice(lims[0],lims[-1] ) ) xx = 2 * np.pi/plot_data.k - #F = M.figure_axis_xy(5, 4) - #ax = plt.subplot(1, 1, 1, polar=True) - # if np.nanmax(plot_data.data) != np.nanmin(plot_data.data): ax3 = F.fig.add_subplot(gs, polar=True) FP= plot_polarspectra(xx, plot_data.angle, plot_data, lims=[xx[-1], 340 ] , verbose= False, data_type= 'fraction') FP.clevs=np.linspace(np.nanpercentile(plot_data.data, 1), np.round(plot_data.max(), 4), 21) FP.linear(ax = ax3, cbar_flag=False) - #FP.cbar.set_label('Energy Density ( (m/m)$^2$ k$^{-1}$ deg$^{-1}$ )', rotation=0, fontsize=10) - #plt.show() plt.title('\n\n'+i_lstring,y=1.0, pad=-6, color=col.green) F.save_pup(path = plot_path, name = 'B05_dir_ov_'+track_name) F.save_light(path = plot_path, name = 'B05_dir_ov_'+track_name) -# %% -#F.save_pup(path = plot_path, name = 'B05_dir_ov_'+track_name) -# MT.json_save('B05_success', plot_path + '../', {'time':time.asctime( time.localtime(time.time()) )}) diff --git a/analysis/SB04_2d_wavefield_emulator.py b/analysis/SB04_2d_wavefield_emulator.py index ccb030c3..7f2a2037 100644 --- a/analysis/SB04_2d_wavefield_emulator.py +++ b/analysis/SB04_2d_wavefield_emulator.py @@ -1,6 +1,5 @@ import os, sys -#execfile(os.environ['PYTHONSTARTUP']) """ This file open a ICEsat2 track applied filters and corections and returns smoothed photon heights on a regular grid in an .nc file. @@ -10,8 +9,6 @@ exec(open(os.environ['PYTHONSTARTUP']).read()) exec(open(STARTUP_2021_IceSAT2).read()) - #%matplotlib inline - import ICEsat2_SI_tools.convert_GPS_time as cGPS import h5py import ICEsat2_SI_tools.io as io @@ -22,12 +19,9 @@ import spicke_remover import datetime import concurrent.futures as futures - #import s3fs - # %% + track_name, batch_key, test_flag = io.init_from_input(sys.argv) # loads standard experiment - #track_name, batch_key, test_flag = '20190605061807_10380310_004_01', 'SH_batch01', False - #track_name, batch_key, test_flag = '20190601094826_09790312_004_01', 'SH_batch01', False - #track_name, batch_key, test_flag = '20190207111114_06260210_004_01', 'SH_batch02', False + track_name, batch_key, test_flag = '20190219073735_08070210_004_01', 'SH_batch02', False @@ -35,23 +29,21 @@ #print(track_name, batch_key, test_flag) hemis, batch = batch_key.split('_') - #track_name= '20190605061807_10380310_004_01' ATlevel= 'ATL03' save_path = mconfig['paths']['work'] + '/B03_spectra_'+hemis+'/' save_name = 'B03_'+track_name - #plot_path = mconfig['paths']['plot'] + '/'+hemis+'/'+batch_key+'/' + track_name + '/B_spectra/' plot_path = mconfig['paths']['plot'] + '/phase_fitting_fake/2D_fake/' MT.mkdirs_r(plot_path) MT.mkdirs_r(save_path) bad_track_path =mconfig['paths']['work'] +'bad_tracks/'+ batch_key+'/' -# %% + if __name__ == '__main__': all_beams = mconfig['beams']['all_beams'] high_beams = mconfig['beams']['high_beams'] low_beams = mconfig['beams']['low_beams'] - #Gfilt = io.load_pandas_table_dict(track_name + '_B01_regridded', load_path) # rhis is the rar photon data + load_path = mconfig['paths']['work'] +'/B01_regrid_'+hemis+'/' Gd = io.load_pandas_table_dict(track_name + '_B01_binned' , load_path) # @@ -70,30 +62,21 @@ Gspec.coords['T'] = 1/Gspec.coords['f'] Gspec=Gspec.swap_dims({'k': 'f'}) - #Gspec.spectral_power_optm.sel(beam='weighted_mean').plot() - # %% - #k_lim= 0.02 + A, B = Gspec.sel(beam= 'gt2r').Y_model_hat , Gspec.sel(beam= 'gt2l').Y_model_hat r_ave_kargs={'x':2, 'f':10, 'center':True, 'min_periods':2} r_ave_kargs2={'f':10, 'center':True, 'min_periods':2} - #(abs(B) - abs(A)).plot() + S_aa = (A*A.conj()).real S_bb = (B*B.conj()).real - # co_spec = (A.conj() *B) /S_aa/S_bb - # abs(co_spec).plot() + co_spec = (A.conj() *B).rolling(**r_ave_kargs).mean() np.log(abs(co_spec)).plot(levels=np.arange(-2, 3, 0.1)) (abs(co_spec)).plot(levels=np.exp(np.arange(-3, 2, 0.1))) abs(co_spec).mean('x').plot() - -#(abs(co_spec)/(S_aa *S_bb).rolling(**r_ave_kargs).mean()).plot() -# -# (abs(A.conj() *B)/(S_aa *S_bb)).rolling(**r_ave_kargs).mean()[:,:].plot() - -# %% if __name__ == '__main__': L1 = 50 k1 = 2* np.pi /L1 @@ -111,11 +94,9 @@ alpha = 35 kk, ll = np.cos(alpha * np.pi/180) * np.array([0.9*k1, k1, 1.1* k1]), np.sin(alpha * np.pi/180) * np.array([0.9* k1, 1*k1, 1.1* k1]) M_k, M_l = kk.size, ll.size - #y =np.sin(k1* x) + np.sin(k2* x) kk_mesh, ll_mesh = np.meshgrid(kk, ll) kk_mesh, ll_mesh = kk_mesh.reshape(kk_mesh.size), ll_mesh.reshape(ll_mesh.size) - G = np.cos(np.outer(XX, kk_mesh) + np.outer(YY, ll_mesh)).T# + np.sin(np.outer(XX, kk_mesh) + np.outer(YY, ll_mesh)).T - #G = np.vstack([ np.cos(np.outer(x, k) + np.outer(y, l)).T , np.sin(np.outer(x, k) + np.outer(y, l) ).T ] ).T + G = np.cos(np.outer(XX, kk_mesh) + np.outer(YY, ll_mesh)).T G.shape plt.contourf(x, y, G.sum(0).reshape(Ny, Nx) ) @@ -124,7 +105,6 @@ # %% radial coordincates def gaus_2d(x, y, pos_tuple, sigma_g ): - #grid = ( (XX - pos_tuple[0]) * (YY - pos_tuple[1]) ) gx = np.exp(-0.5 * (x - pos_tuple[0])**2 /sigma_g**2 ) gy = np.exp(-0.5 * (y - pos_tuple[1])**2 /sigma_g**2 ) return np.outer(gx , gy).T @@ -140,8 +120,6 @@ def gaus_2d(x, y, pos_tuple, sigma_g ): plt.contourf(k_range, l_range, gaus_lk ) plt.axis('equal') -# %% - k_0 = 0.03 l_0 = 0 dk = 0.01 @@ -203,7 +181,6 @@ def gaus_2d_mesh(XX,YY, pos_tuple, sigma_g ): plt.axis('equal') -# %% radial coodinates def get_stancils_polar( amp, angle_rad, size=1, dk = 0.01, mesh = True, plot_flag = True, amp_std= None, random=True): """ inputs: @@ -263,10 +240,6 @@ def get_stancils_polar( amp, angle_rad, size=1, dk = 0.01, mesh = True, plot_fl if __name__ == '__main__': font_for_pres() - #k_mesh.shape - #for angle in np.arange(-80, 80+20, 40): - #for phase in np.arange(0, 2*np.pi, np.pi/3): - #for k_abs in np.arange(0.01, 0.09, 0.01): angle =45 amp =1 @@ -276,24 +249,16 @@ def get_stancils_polar( amp, angle_rad, size=1, dk = 0.01, mesh = True, plot_fl for dk in np.arange(0.005, 0.03, 0.002): for size in [2, 5, 10, 30, 50, 100, 200]: - #for angle in np.arange(-80, 80+20, 40): - #for k_abs in np.arange(0.01, 0.09, 0.01): - #for phase in np.arange(0, 2*np.pi, np.pi/3): - - F = M.figure_axis_xy(8, 3, view_scale = 0.5, container =False) plt.suptitle('k_abs=' + str(k_abs) +' angle=' + str(angle) + ' size=' + str(size) +' dk=' + str(dk) ) ax = plt.subplot(1, 2, 1) k_list, l_list, amp_weights, stancil_shape = get_stancils_polar(k_abs, angle * np.pi/180, size=size, dk = dk, mesh = True , plot_flag= True, random = True) - circle1 = plt.Circle((k_list[0], l_list[0]), dk, color='b', fill=False) ax.add_patch(circle1) - k_noise, l_noise, amp_noise, stancil_shape = get_stancils_polar(0.8, 0 * np.pi/180, size=20, dk = 0.3, mesh = True , plot_flag= True, random = True) amp_noise = (amp_noise *0+1) * 0 - plt.xlim(0, 0.1) #plt.axis('equal') plt.ylim(-0.1, 0.1) @@ -308,10 +273,6 @@ def get_stancils_polar( amp, angle_rad, size=1, dk = 0.01, mesh = True, plot_fl amp_all = np.concatenate([amp_weights, amp_noise]) amp_all.shape G = np.vstack([ np.cos(np.outer(XX, k_all) + np.outer(YY, l_all)).T , np.sin(np.outer(XX, k_all) + np.outer(YY, l_all)).T ] ).T - #G = np.vstack([ np.cos(np.outer(x, k) + np.outer(y, l)).T , np.sin(np.outer(x, k) + np.outer(y, l) ).T ] ).T - - #phase1 = np.random.rand(1, amp_list.size) * np.pi*2 - #phase = np.arange(0, amp_list.size) * np.pi/2 b = np.hstack([ np.cos(phase)*amp_all, np.sin(phase) *amp_all]).squeeze() * amp z_model = (G @ b).reshape(Ny, Nx) @@ -323,9 +284,3 @@ def get_stancils_polar( amp, angle_rad, size=1, dk = 0.01, mesh = True, plot_fl F.save_light(path = plot_path, name = 'fake_2d_dk' +str(dk) +'_s' + str(int(size))) plt.show() - - # class twoD_wave_packets(object): - # def __init__(self): - - - # %% diff --git a/analysis_db/B05_define_angle.py b/analysis_db/B05_define_angle.py index 7cf9f5ff..05aa1c37 100644 --- a/analysis_db/B05_define_angle.py +++ b/analysis_db/B05_define_angle.py @@ -280,7 +280,7 @@ def weighted_means(data, weights, x_angle, color='k'): exit() # %% -class plot_polarspectra(object): +class plot_polarspectra: def __init__(self,k, thetas, data, data_type='fraction' ,lims=None, verbose=False): """ diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/angle_optimizer.py b/src/icesat2_tracks/ICEsat2_SI_tools/angle_optimizer.py index ccb858fe..fa167fcc 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/angle_optimizer.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/angle_optimizer.py @@ -122,7 +122,7 @@ def simple_log_panelty(x, x0, sigma): return - 0.5 * (cost_sqrt/tot_var + np.log(tot_var) ).sum() + prior_weight * penalties -class sample_with_mcmc(object): +class sample_with_mcmc: """ sample a 2nd surface using mcmc and other methods. its made for getting a quick estimate! diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py index 2ca2ca4d..72eda331 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py @@ -156,7 +156,7 @@ def define_weights(stancil, prior, x, y, dx, k, max_nfev, plot_flag=False): return weight, prior_pars -class wavenumber_spectrogram_gFT(object): +class wavenumber_spectrogram_gFT: def __init__(self, x, data, L, dx, wavenumber, data_error=None, ov=None): """ returns a wavenumber spectrogram with the resolution L-ov @@ -720,7 +720,7 @@ def power_from_model(p_hat, dk, M, N_x, N_x_full): return spec -class generalized_Fourier(object): +class generalized_Fourier: def __init__(self, x, ydata, k): """ non_dimensionalize (bool, default=True) if True, then the data and R_data_uncertainty is non-dimensionalized by the std of the data diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/io.py b/src/icesat2_tracks/ICEsat2_SI_tools/io.py index 332ce7f6..521e305c 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/io.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/io.py @@ -70,13 +70,10 @@ def ID_to_str(ID_name): date return IDs[0] +' ' +date +' granule: ' + IDs[2] -class case_ID(object): +class case_ID: """docstring for case_ID""" def __init__(self, track_name): import re - super(case_ID, self).__init__() - - #track_name_pattern = r'(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})_(\d{4})(\d{2})(\d{2})_(\d{3})_(\d{2})' track_name_pattern = r'(\D{2}|\d{2})_?(\d{4})(\d{2})(\d{2})(\d{2})?(\d{2})?(\d{2})?_(\d{4})(\d{2})(\d{2})_?(\d{3})?_?(\d{2})?' case_ID_pattern = r'(\d{4})(\d{2})(\d{2})_(\d{4})(\d{2})(\d{2})' diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/spectral_estimates.py b/src/icesat2_tracks/ICEsat2_SI_tools/spectral_estimates.py index 52960e44..d46d9ea2 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/spectral_estimates.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/spectral_estimates.py @@ -376,7 +376,7 @@ def get_lon_lat_coords(stancil): return coord_positions -class wavenumber_spectrogram(object): +class wavenumber_spectrogram: def __init__(self, x_grid, data, Lpoints, ov=None, window=None): """ returns a wavenumber spectrogram with the resolution L-ov @@ -512,7 +512,7 @@ def get_stancil_var_apply(stancil): self.G.attrs['mean_variance_detrended_chunks'] = np.array(stancil_vars).mean() self.G.attrs['mean_variance_pwelch_spectrum'] = self.calc_var() -class wavenumber_spectrogram_LS_even(object): +class wavenumber_spectrogram_LS_even: def __init__(self, x, data, L, waven_method = 'fftX2' , dy=None , ov=None, window=None, kjumps=1): """ returns a wavenumber spectrogram with the resolution L-ov @@ -618,7 +618,7 @@ def parceval(self, add_attrs=True ): def mean_spectral_error(self, confidence = 0.95 ): return wavenumber_spectrogram.mean_spectral_error(self, confidence= confidence ) -class wavenumber_spectrogram_LS(object): +class wavenumber_spectrogram_LS: def __init__(self, x, data, L, dx, dy = None, waven_method = 'fftX2', ov=None, window=None): """ returns a wavenumber spectrogram with the resolution L-ov @@ -925,7 +925,7 @@ def mean_spectral_error(self, mask=None, confidence = 0.95 ): # class for getting standard Pwelch spectrum. old version, deprechiate -class wavenumber_pwelch(object): +class wavenumber_pwelch: def __init__(self,data, x, L, ov=None, window=None, save_chunks=False, plot_chunks=False): """ returns a wavenumber spectrum using the pwelch method @@ -1098,7 +1098,7 @@ def calc_var(self): # %% optimze spectral variance -class conserve_variance(object): +class conserve_variance: def __init__(self,Z, freq, data, nan_mask= None): """ diff --git a/src/icesat2_tracks/analysis_fake_data/X03_MCMC_surface_smapling.py b/src/icesat2_tracks/analysis_fake_data/X03_MCMC_surface_smapling.py index 312c157d..4e3f65a9 100644 --- a/src/icesat2_tracks/analysis_fake_data/X03_MCMC_surface_smapling.py +++ b/src/icesat2_tracks/analysis_fake_data/X03_MCMC_surface_smapling.py @@ -41,7 +41,7 @@ def cost(x, y): z = 4+ np.sin(4* 2 * np.pi *x/Lx) + np.sin( 3 * np.pi *x/Lx - np.pi/5) + np.cos(1* 2 * np.pi *y/Ly) + np.sin( 3 * np.pi *y/Ly - np.pi/3) return z**2 -class sample_with_mcmc(object): +class sample_with_mcmc: """ sample a 2nd surface using mcmc. its make for getting a quick estimate! """ diff --git a/src/icesat2_tracks/local_modules/m_colormanager_ph3.py b/src/icesat2_tracks/local_modules/m_colormanager_ph3.py index 074e07a7..17163538 100644 --- a/src/icesat2_tracks/local_modules/m_colormanager_ph3.py +++ b/src/icesat2_tracks/local_modules/m_colormanager_ph3.py @@ -50,7 +50,7 @@ def json_load(name, path, verbose=False): -class color(object): +class color: def __init__(self, path=None, name=None): self.white=(1,1,1) if (path is not None) & (name is not None): diff --git a/src/icesat2_tracks/local_modules/m_general_ph3.py b/src/icesat2_tracks/local_modules/m_general_ph3.py index e268658b..6e24a8d9 100644 --- a/src/icesat2_tracks/local_modules/m_general_ph3.py +++ b/src/icesat2_tracks/local_modules/m_general_ph3.py @@ -27,7 +27,7 @@ import matplotlib.pyplot as plt -class color(object): +class color: def __init__(self): self.red=(203/255, 32/255, 39/255) @@ -61,7 +61,7 @@ def show(self): # funny massage -class figure_axis_xy(object): +class figure_axis_xy: """define standart XY Plot with reduced grafics""" def __init__(self,x_size=None,y_size=None,view_scale=None, size_tuple=None , fig_scale=None, container=False, dpi=180): @@ -171,7 +171,7 @@ class subplot_routines(figure_axis_xy): def __init__(self, ax): self.ax=ax -class plot_sprecta(object): +class plot_sprecta: def __init__(self,fs, Xdata,sample_unit=None,data_unit=None): self.fs=fs @@ -261,7 +261,7 @@ def power(self, Color='b', fig_scale=2, fax='f'): self.F.make_clear() plt.grid() -class plot_periodogram(object): +class plot_periodogram: def __init__(self,time,fs, data,clevs=None, sample_unit=None, data_unit=None, ylim=None, time_unit=None, cmap=None): self.fs=fs[1:] @@ -696,7 +696,7 @@ def set_xaxis_to_days(self, **kwargs): set_timeaxis_days(self.ax, **kwargs) -class plot_polarspectra(object): +class plot_polarspectra: def __init__(self,f, thetas, data,unit=None, data_type='fraction' ,lims=None, verbose=False): self.f=f @@ -1256,7 +1256,7 @@ def spickes_to_mean(ts, nloop=None, spreed=1, gaussian=True): ## Composites -class composite_data(object): +class composite_data: def __init__(self,var, index_weight=None): #print(var.shape) self.composites=var @@ -1285,7 +1285,7 @@ def bootstrap(self, ci=[2.5, 50, 97.5], reps=1000,): yb = 1/np.arange(1, n+1)[:, None] * np.cumsum(xb, axis=0) upper, lower = np.percentile(yb, [2.5, 97.5], axis=1) -class comp_iter(object): +class comp_iter: def __init__(self, span, dt=None, unit=None): self.span=list(span) for s in self.span: @@ -1306,7 +1306,7 @@ def __init__(self, span, dt=None, unit=None): self.time_iter_string=time_str -class composite(object): +class composite: def __init__(self,index, time=None, weigthing=False, span=None): """ Initial Class for bulding composite based on: index position in the time vector 'time' diff --git a/src/icesat2_tracks/local_modules/m_spectrum_ph3.py b/src/icesat2_tracks/local_modules/m_spectrum_ph3.py index 19b49700..f904b63c 100644 --- a/src/icesat2_tracks/local_modules/m_spectrum_ph3.py +++ b/src/icesat2_tracks/local_modules/m_spectrum_ph3.py @@ -91,7 +91,7 @@ def spicke_remover(data, nstd=20.0, spreed=500.0, max_loops=10.0 , verbose=False return data2 , act_flag -class Spectrum(object): +class Spectrum: """ A class that represents a single realization of the one-dimensional spectrum of a given field phi """ @@ -149,7 +149,7 @@ def parceval(self): -class moments(object): +class moments: def __init__(self,data_org,dt, L=None, ov=None,window=None, save_chunks=False, plot_chunks=False, prewhite=None): """ This function calculates the spectral moments from a station (buoy, GPS, or seismic station) that measures @@ -396,7 +396,7 @@ def cal_MEM(self, theta=None, flim=(0.01, .5)): -class pwelch(object): +class pwelch: def __init__(self,data,dt,L=None, ov=None,window=None, save_chunks=False, plot_chunks=False, periodogram=False, prewhite=None): """ prewhite None(default) @@ -800,7 +800,7 @@ def save_data(self, path=None, S=None): save_file(P, path) -class save_data_periodogram(object): +class save_data_periodogram: def __init__(self,P, S=None): self.meta=S.meta if S is not None else '' self.data_unit=S.unit if S is not None else '' From 417d0ac3e3852a2ae29bf32428c587d1ef4315f7 Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Mon, 15 Jan 2024 06:17:32 -0500 Subject: [PATCH 11/12] remving old class defition --- src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py index 2ca2ca4d..72eda331 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py @@ -156,7 +156,7 @@ def define_weights(stancil, prior, x, y, dx, k, max_nfev, plot_flag=False): return weight, prior_pars -class wavenumber_spectrogram_gFT(object): +class wavenumber_spectrogram_gFT: def __init__(self, x, data, L, dx, wavenumber, data_error=None, ov=None): """ returns a wavenumber spectrogram with the resolution L-ov @@ -720,7 +720,7 @@ def power_from_model(p_hat, dk, M, N_x, N_x_full): return spec -class generalized_Fourier(object): +class generalized_Fourier: def __init__(self, x, ydata, k): """ non_dimensionalize (bool, default=True) if True, then the data and R_data_uncertainty is non-dimensionalized by the std of the data From 06d2e26bab3529b8f02ce096c3c22f6b4afcae13 Mon Sep 17 00:00:00 2001 From: Camilo Diaz Date: Mon, 15 Jan 2024 09:17:55 -0500 Subject: [PATCH 12/12] refactoring rebin funciton --- src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py index 72eda331..32280284 100644 --- a/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py +++ b/src/icesat2_tracks/ICEsat2_SI_tools/generalized_FT.py @@ -4,19 +4,17 @@ import icesat2_tracks.ICEsat2_SI_tools.lanczos as lanczos -def rebin(data, dk, return_edges=False): +def rebin(data, dk): """ rebin data to a new k-grid with dk """ - k_low_limits = data.k[::10] + k_low_limits = data.k[::dk] Gmean = data.groupby_bins("k", k_low_limits).mean() k_low = (k_low_limits + k_low_limits.diff("k")[0] / 2).data Gmean["k_bins"] = k_low[0:-1] Gmean = Gmean.rename({"k_bins": "k"}) - if return_edges: - return Gmean, k_low_limits - else: - return Gmean + return Gmean, k_low_limits + # define weight function