diff --git a/codecov.yml b/codecov.yml index bb6575c0..7e866a9d 100644 --- a/codecov.yml +++ b/codecov.yml @@ -8,7 +8,7 @@ coverage: project: default: target: auto - threshold: 0 + threshold: 1 set_pending: yes if_no_uploads: error if_not_found: error diff --git a/src/noisepy/monitoring/attenuation_utils.py b/src/noisepy/monitoring/attenuation_utils.py index 789ffd56..5c72a1f9 100644 --- a/src/noisepy/monitoring/attenuation_utils.py +++ b/src/noisepy/monitoring/attenuation_utils.py @@ -1,7 +1,9 @@ import logging import math +import os from typing import Tuple +import matplotlib.pyplot as plt import numpy as np from noisepy.monitoring.monitoring_utils import ConfigParameters_monitoring @@ -136,20 +138,203 @@ def check_s0(x: float) -> None: ### ----- -def get_SSR(fnum: int, para) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: +def window_determine(tbeg: float, tend: float, nwindows=6) -> Tuple[np.ndarray]: + """ + # Determine the window begin and end time + ---------------------------------------------- + Parameters: + tbeg: The begin time of the window + tend: The end time of the window + dt: The sampling interval + ---------------------------------------------- + Return: + twinbe: The window begin and end time array + ---------------------------------------------- + """ + + twinbe = np.ndarray((nwindows, 2)) + coda_length = tend - tbeg + + for i in range(3): + twinbe[i][0] = tbeg + i * coda_length / 10 + twinbe[i][1] = tbeg + (i + 1) * coda_length / 10 + + twinbe[i + 3][0] = tbeg + (i + 5) * coda_length / 10 + twinbe[i + 3][1] = tbeg + (i + 5 + 1) * coda_length / 10 + + return twinbe + + +### ----- +def get_energy_density( + fmsv_single: np.ndarray, dt: float, cvel: float, r: float, npts: int, mean_free_path: float, intrinsic_b: float +) -> Tuple[np.ndarray, np.ndarray]: + """ + # Calculate the energy density function + ---------------------------------------------- + Parameters: + fmsv_single: The mean-squared value + dt: The sampling interval + cvel: The Rayleigh wave velocity + r: The distance between the source and receiver + npts: The total sample number + mean_free_path: The mean free path + intrinsic_b: The intrinsic absorption parameter b + ---------------------------------------------- + Return: + Esyn_temp: The synthetic energy density function + Eobs_temp: The observed energy density function + ---------------------------------------------- + """ + + monito_config = ConfigParameters_monitoring() + ONESTATION = monito_config.single_station + + # initialize the temporary arrays + Esyn_temp = np.zeros((npts // 2 + 1)) + Eobs_temp = np.zeros((npts // 2 + 1)) + + Eobs_temp = fmsv_single + # calculate the Esyn and SSR for combination of mean_free_path and intrinsic_b + for twn in range(npts // 2): + tm = dt * twn + s0 = cvel**2 * tm**2 - r**2 + if s0 <= 0: + # logger.warning(f"s0 {s0} <0") + continue + + if ONESTATION: + tmp = ESYN_RadiaTrans_onesta(mean_free_path, tm, r, cvel) + else: + tmp = ESYN_RadiaTrans_intersta(mean_free_path, tm, r, cvel) + + Esyn_temp[twn] = tmp * math.exp(-1 * intrinsic_b * tm) + + return Esyn_temp, Eobs_temp + + +# --- +def scaling_Esyn(Esyn_temp: np.ndarray, Eobs_temp: np.ndarray, twindow: np.ndarray, dt: float) -> np.ndarray: + """ + # Scaling the Esyn in the specific window + ---------------------------------------------- + Parameters: + Esyn_temp: The synthetic energy density function + Eobs_temp: The observed energy density function + twindow: The window begin and end time array + dt: The sampling interval + ---------------------------------------------- + Return: + Esyn_temp: The scaled synthetic energy density function + ---------------------------------------------- + """ + + # find the scaling factor in the specific window + SSR_temppp = np.zeros((len(twindow))) + for tsn in range(len(twindow)): + tsb = int(twindow[tsn] // dt) + SSR_temppp[tsn] = math.log10(Eobs_temp[tsb]) - math.log10(Esyn_temp[tsb]) + crap = np.mean(SSR_temppp) + Esyn_temp *= 10**crap # scale the Esyn + # using scalar factor for further fitting processes --> shape matters more than amplitude + + return Esyn_temp, 10**crap + + +# ### ----- +def windowing_SSR( + coda_window: np.array, + mean_free_path: float, + intrinsic_b: float, + fmsv: np.ndarray, + dist: np.ndarray, + npts: int, + dt: float, + cvel: float, + ncoda: int, + PLOT_CHECK: bool = False, +) -> float: + """ + # Calculate the sum of squared residuals (SSR) in the specific window + ---------------------------------------------- + Parameters: + Eobs: The observed energy density function + Esyn: The synthetic energy density function + coda_window: The window begin and end time array + mean_free_path: The mean free path + intrinsic_b: The intrinsic absorption parameter b + npts: The total sample number + ---------------------------------------------- + Return: + window_SSR: The sum of squared residuals in the specific window + """ + Esyn_temp = np.zeros((npts // 2)) + Eobs_temp = np.zeros((npts // 2)) + + npair = fmsv.shape[0] + window_SSR = 0.0 + if PLOT_CHECK: + fig, ax = plt.subplots(npair, figsize=(6, 40)) + + for aa in range(npair): + twindow = np.arange((coda_window[aa, 0]), (coda_window[aa, 1]), dt) + r = float(dist[aa]) + + # Get the energy density + Esyn_temp, Eobs_temp = get_energy_density(fmsv[aa], dt, cvel, r, npts, mean_free_path, intrinsic_b) + + # Scale the Esyn + scaled_Esyn, scaling_amplitude = scaling_Esyn(Esyn_temp, Eobs_temp, twindow, dt) + # logger.info(f"pair {aa}, (mfp, intb)=({mean_free_path:.1f}, {intrinsic_b:.2f}) \ + # -- scaling amp: {scaling_amplitude:.2f}") + + if PLOT_CHECK: + ax[aa].plot(Eobs_temp, color="black", ls="-", label="Eobs") + ax[aa].plot(scaled_Esyn, color="blue", ls="-", label="Esyn") + ax[aa].plot([twindow[0] // dt, twindow[0] // dt], [0, 1], color="red", ls="--", label="window") + ax[aa].plot([twindow[-1] // dt, twindow[-1] // dt], [0, 1], color="red", ls="--") + ax[aa].set_title( + f"pair {aa} on (mfp, intb)=({mean_free_path:.1f}, {intrinsic_b:.2f}) \ + -- scaling amp: {scaling_amplitude:.2f}" + ) + ax[aa].set_yscale("log") + ax[aa].set_xlabel("Npts") + ax[aa].set_xlim(0, npts // 8) + ax[aa].set_ylim(10**-6, 10) + + # Calculate the SSR in the rescale Esyn and Eobs + tsb = int(twindow[0] // dt) + tse = int((twindow[-1]) // dt) + 1 + SSR_temp = 0.0 + for twn in range(tsb, tse): + SSR_temp = SSR_temp + ((Eobs_temp[twn]) - (scaled_Esyn[twn])) ** 2 + window_SSR += SSR_temp + + if PLOT_CHECK: + os.system("mkdir figure_checking") + ax[0].legend(loc="upper right") + plt.tight_layout() + fname = f"figure_checking/Scaled_density_ncoda{ncoda}_mfp{mean_free_path:.1f}_intb{intrinsic_b:.2f}.png" + plt.savefig(fname) + plt.close(fig) + + return window_SSR + + +### ----- +def get_SSR_codawindows(para) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ # Calculate the sum of squared residuals (SSR) between Eobs and Esyn # for different combination of mean free path and intrinsic absorption parameter b ---------------------------------------------- Parameters: - fb: the number of used frequency band dt: data sampling interval - c: Rayleigh wave velocity + cvel: Rayleigh wave velocity npts: Total sample number vdist: Inter-station distance mfpx: The mean free path array intby: The intrinsic absorption parameter b array - twinbe: Window begin time and end time array + twin_select: Window begin time and end time array fmsv_mean: Observed mean-squared value ---------------------------------------------- Return: @@ -161,164 +346,122 @@ def get_SSR(fnum: int, para) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: # default config parameters which can be customized monito_config = ConfigParameters_monitoring() - fb = para["fb"] dt = para["dt"] - c = para["cvel"] + cvel = para["cvel"] vdist = para["vdist"] + npts = para["npts"] mfpx = para["mfp"] intby = para["intb"] - twinbe = para["twin"] - npts = para["npts"] + twin_select = para["twin"] fmsv_mean = para["fmsv"] intb_interval_base = monito_config.intb_interval_base mfp_interval_base = monito_config.mfp_interval_base + nwindows = twin_select.shape[1] + + SSR_final = np.zeros((len(mfpx), len(intby))) + # grid search in combination of mean_free_path and intrinsic_b + for nfree in range(len(mfpx)): + mean_free = 0.2 + mfp_interval_base * nfree + mfpx[nfree] = mean_free + for nb in range(len(intby)): + intrinsic_b = intb_interval_base * (nb + 1) + intby[nb] = intrinsic_b + + SSR_temp = 0.0 + #### specific window for all pairs + for ncoda in range(nwindows): + twindow = twin_select[:, ncoda, :] + single_window_SSR = windowing_SSR( + twindow, mean_free, intrinsic_b, fmsv_mean, vdist, npts, dt, cvel, ncoda + ) + + SSR_temp = SSR_temp + single_window_SSR + # logger.info(f"window {ncoda}, (mfp, intb)=({mean_free:.1f}, {intrinsic_b:.2f}) \ + # -- single SSR: {single_window_SSR:.2f}, final SSR: {SSR_temp:.2f}") + SSR_final[nfree][nb] = SSR_temp - Esyn_temp = np.ndarray((len(mfpx), len(intby), npts // 2 + 1)) - Eobs_temp = np.ndarray((len(mfpx), len(intby), npts // 2 + 1)) - SSR_final = np.ndarray((len(mfpx), len(intby))) - SSR_final[:][:] = 0.0 - for aa in range(fnum): - r = float(vdist[aa]) - if r <= 10 ** (-6): - r = 0.000001 # To avoid zero value at denominator - - twindow = [] - twindow = range(int(twinbe[aa][fb][0]), int(twinbe[aa][fb][1]), 1) - SSR_temppp = np.ndarray((len(mfpx), len(intby), len(twindow))) - - # grid search in combination of mean_free_path and intrinsic_b - Esyn_temp[:][:][:] = 0.0 - Eobs_temp[:][:][:] = 0.0 - - for nfree in range(len(mfpx)): - mean_free = 0.2 + mfp_interval_base * nfree - mfpx[nfree] = mean_free - for nb in range(len(intby)): - intrinsic_b = intb_interval_base * (nb + 1) - intby[nb] = intrinsic_b - - # calculate the Esyn and SSR for combination of mean_free_path and intrinsic_b - for twn in range(npts // 2 + 1): - tm = dt * twn - Eobs_temp[nfree][nb][twn] = fmsv_mean[aa][fb + 1][twn] - - s0 = c**2 * tm**2 - r**2 - if s0 < 0: - # logger.warning(f"s0 {s0} <0") - continue - - tmp = ESYN_RadiaTrans_onesta(mean_free, tm, r, c) - Esyn_temp[nfree][nb][twn] = tmp * math.exp(-1 * intrinsic_b * tm) - # using scalar factor for further fitting processes --> shape matters more than amplitude - - #### specific window --> find the scaling factor in the specific window - for tsn in range(len(twindow)): - tsb = int(twindow[tsn] // dt) - SSR_temppp[nfree][nb][tsn] = 0.0 - SSR_temppp[nfree][nb][tsn] = math.log10(Eobs_temp[nfree][nb][tsb]) - math.log10( - Esyn_temp[nfree][nb][tsb] - ) - - crap = np.mean(SSR_temppp[nfree][nb]) - Esyn_temp[nfree][nb] *= 10**crap # scale the Esyn - - #### specific window - #### Calculate the SSR in the specific window - for tsn in range(len(twindow)): - tsb = int(twindow[tsn] // dt) - tse = int((twindow[tsn] + 1) // dt) - SSR_temp = 0.0 - for twn in range(tsb, tse): - SSR_temp += (math.log10(Eobs_temp[nfree][nb][twn]) - math.log10(Esyn_temp[nfree][nb][twn])) ** 2 - SSR_final[nfree][nb] += SSR_temp - # --- time comsuming for plotting out individual fitting curves - # plot_fitting_curves(mean_free,y,fmsv_mean[aa][0][:],Eobs_temp[nfree],Esyn_temp[nfree],fname[aa],vdist[aa],twindow) # logger.info(f"mean_free: {mean_free}, intrinsic_b {intrinsic_b}, SSR:{SSR_temp}") - SSR_final = SSR_final / (np.min(SSR_final[:][:])) + SSR_final = SSR_final / (np.min(SSR_final[:][:])) return SSR_final, mfpx, intby -def get_optimal_Esyn(fnum: int, para) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: +def get_optimal_Esyn(para) -> Tuple[float, float, np.ndarray, np.ndarray, np.ndarray]: """ # Getting the optimal value from the grid searching results (the SSR output from the get_SSR) # Return with the optimal value of mean free path, intrinsic absorption parameter # and the optimal fit of synthetic energy density function ---------------------------------------------- Parameters: - fb: the number of used frequency band dt: data sampling interval - c: Rayleigh wave velocity + cvel: Rayleigh wave velocity npts: Total sample number vdist: Inter-station distance mfpx: The mean free path array intby: The intrinsic absorption parameter b array - twinbe: Window begin time and end time array - fmsv_mean: Observed mean-squared value - fmin: Lower bound of measured frequency band - fmax: Upper bound of measured frequency band + twin_select: Window begin time and end time array + fmsv: Observed mean-squared value SSR: The sum of squared residuals over combination of mean free path and intrinsic absorption parameter b sta_pair: Station pair - aa: The file number of measurment (here in this test is 0) ---------------------------------------------- Return: result_intb: The optimal value of intrinsic absorption parameter b result_mfp: The optimal value of mean free path Eobs: The observed energy density function Esyn: The optimal fit of synthetic energy density function + scaling_amplitude: The scaling amplitude on synthetic energy density function ---------------------------------------------- """ - fb = para["fb"] + dt = para["dt"] - c = para["cvel"] + cvel = para["cvel"] vdist = para["vdist"] mfpx = para["mfp"] intby = para["intb"] - twinbe = para["twin"] + twin_select = para["twin"] npts = para["npts"] - fmsv_mean = para["fmsv"] + fmsv = para["fmsv"] - fmin = para["fmin"] - fmax = para["fmax"] SSR = para["SSR"] sta_pair = para["sta"] - aa = para["filenum"] - r = float(vdist[aa]) - if r <= 10 ** (-6): - r = 0.000001 # To avoid zero value at denominator - - loc = np.where(SSR[fb].T == np.amin(SSR[fb].T)) - ymin = intby[loc[0]] - xmin = mfpx[loc[1]] - logger.info(f" Station Pair: {sta_pair}, frequency band {fmin}-{fmax}Hz, intrinsic_b {ymin}, mean_free: {xmin}") - result_intb = np.take(ymin, 0) - result_mfp = np.take(xmin, 0) - - twindow = [] - twindow = range(int(twinbe[aa][fb][0]), int(twinbe[aa][fb][1]), 1) - - Eobs = np.ndarray((npts // 2 + 1)) - Esyn = np.ndarray((npts // 2 + 1)) - temppp = np.ndarray((len(twindow))) - for twn in range(npts // 2 + 1): - tm = dt * twn - s0 = c**2 * tm**2 - r**2 - if s0 <= 0: - continue - Eobs[twn] = fmsv_mean[aa][fb + 1][twn] - tmp = ESYN_RadiaTrans_onesta(result_mfp, tm, r, c) - Esyn[twn] = tmp * math.exp(-1 * result_intb * tm) - - for tsn in range(len(twindow)): - tsb = int(twindow[tsn] // dt) - temppp[tsn] = 0.0 - temppp[tsn] = math.log10(Eobs[tsb]) - math.log10(Esyn[tsb]) - - crap = np.mean(temppp) - Esyn *= 10**crap - return result_intb, result_mfp, Eobs, Esyn + nwindows = twin_select.shape[1] + npair = fmsv.shape[0] + + loc = np.where(SSR.T == np.amin(SSR.T)) + if len(loc[0]) > 1: + loc = (loc[0][0], loc[1][0]) + + intrinsic_b = intby[loc[0]] + mean_free_path = mfpx[loc[1]] + logger.info(f"Station Pair: {sta_pair}, intrinsic_b {intrinsic_b}, mean_free: {mean_free_path}") + + result_intb = np.take(intrinsic_b, 0) + result_mfp = np.take(mean_free_path, 0) + + Eobs = np.zeros((npair, nwindows, npts // 2 + 1)) + Esyn = np.zeros((npair, nwindows, npts // 2 + 1)) + scaling_amplitude = np.zeros((npair, nwindows)) + + for ncoda in range(nwindows): + for aa in range(npair): + twindow = twin_select[aa, ncoda, :] + # Get the energy density + Esyn[aa, ncoda], Eobs[aa, ncoda] = get_energy_density( + fmsv[aa], dt, cvel, vdist, npts, result_mfp, result_intb + ) + + # Scale the Esyn + scaled_Esyn, scaling_temp = scaling_Esyn(Esyn[aa, ncoda], Eobs[aa, ncoda], twindow, dt) + logger.info( + f"nwindow {ncoda}, pair {aa}, (mfp, intb)=({result_mfp:.1f}," + f"{result_intb:.2f}) -- scaling amp: {scaling_temp:.2f}" + ) + Esyn[aa, ncoda] = scaled_Esyn + scaling_amplitude[aa, ncoda] = scaling_temp + + return result_intb, result_mfp, Eobs, Esyn, scaling_amplitude # ----- diff --git a/src/noisepy/monitoring/monitoring_utils.py b/src/noisepy/monitoring/monitoring_utils.py index 34587f12..68337616 100644 --- a/src/noisepy/monitoring/monitoring_utils.py +++ b/src/noisepy/monitoring/monitoring_utils.py @@ -50,6 +50,7 @@ class ConfigParameters_monitoring(BaseModel): ratio: float = Field(default=3.0, description="ratio for determining noise level by Mean absolute deviation (MAD)") # --- paramters for measuring attenuation + single_station: bool = Field(default=True, description="make measurement on single statoin or multiple stations") smooth_winlen: float = Field( default=5.0, description="smoothing window length of the envelope waveforms for measuring attenuation" ) diff --git a/src/noisepy/seis/correlate.py b/src/noisepy/seis/correlate.py index 2c8e99f9..5406a313 100644 --- a/src/noisepy/seis/correlate.py +++ b/src/noisepy/seis/correlate.py @@ -355,10 +355,15 @@ def cross_correlation( if fft_params.cc_method == CCMethod.DECONV: # -----------get the smoothed source spectrum for decon later---------- sfft1 = noise_module.smooth_source_spect(fft_params, src_fft.fft) - sfft1 = sfft1.reshape(src_fft.window_count, src_fft.length // 2) + sfft1 = sfft1.reshape( + src_fft.window_count, src_fft.length // 2 + ) # conjugate already included in smooth_source_spect else: sfft1 = np.conj(src_fft.fft).reshape(src_fft.window_count, src_fft.length // 2) + # note here that sfft1 and ffts have gone through noise_processing already: + # if FreqNorm is not None, then they have been whitened already. + result = cross_corr(fft_params, src_chan, rec_chan, sfft1, sou_ind, ffts[iiR], Nfft) return result diff --git a/src/noisepy/seis/noise_module.py b/src/noisepy/seis/noise_module.py index 63f93f73..eefd9b02 100644 --- a/src/noisepy/seis/noise_module.py +++ b/src/noisepy/seis/noise_module.py @@ -404,11 +404,11 @@ def correlate(fft1_smoothed_abs, fft2, D, Nfft, dataS_t): # ----load paramters---- dt = D["dt"] maxlag = D["maxlag"] - method = D["cc_method"] + # method = D["cc_method"] cc_len = D["cc_len"] substack = D["substack"] substack_len = D["substack_len"] - smoothspect_N = D["smoothspect_N"] + # smoothspect_N = D["smoothspect_N"] nwin = fft1_smoothed_abs.shape[0] Nfft2 = fft1_smoothed_abs.shape[1] @@ -421,16 +421,24 @@ def correlate(fft1_smoothed_abs, fft2, D, Nfft, dataS_t): fft2.size, ) - if method == "coherency": - temp = moving_ave( - np.abs( - fft2.reshape( - fft2.size, - ) - ), - smoothspect_N, - ) - corr /= temp + # check if we are in the case of autocorrelation + if np.all(np.abs(fft1_smoothed_abs) == np.abs(fft2)): + x_corr = False + else: + x_corr = True + + # Marine removes this because if users have FreqNorm as RMA or 1bit or smoothspect_N==1, + # then the fft2 will already be smoothed. + # if method == "coherency": + # temp = moving_ave( + # np.abs( + # fft2.reshape( + # fft2.size, + # ) + # ), + # smoothspect_N, + # ) + # corr /= temp corr = corr.reshape(nwin, Nfft2) if substack: @@ -446,7 +454,8 @@ def correlate(fft1_smoothed_abs, fft2, D, Nfft, dataS_t): crap[:Nfft2] = corr[i, :] crap[:Nfft2] = crap[:Nfft2] - np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0) crap[-(Nfft2) + 1 :] = np.flip(np.conj(crap[1:(Nfft2)]), axis=0) - crap[0] = complex(0, 0) + if x_corr: + crap[0] = complex(0, 0) # this only if fft1 is different than fft2 s_corr[i, :] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0))) # remove abnormal data @@ -478,7 +487,8 @@ def correlate(fft1_smoothed_abs, fft2, D, Nfft, dataS_t): crap[:Nfft2] = np.mean(corr[itime, :], axis=0) # linear average of the correlation crap[:Nfft2] = crap[:Nfft2] - np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0) crap[-(Nfft2) + 1 :] = np.flip(np.conj(crap[1:(Nfft2)]), axis=0) - crap[0] = complex(0, 0) + if x_corr: + crap[0] = complex(0, 0) s_corr[istack, :] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0))) n_corr[istack] = len(itime) # number of windows stacks t_corr[istack] = tstart # save the time stamps @@ -1076,7 +1086,7 @@ def whiten_1D(timeseries, fft_para: ConfigParameters, n_taper): dt: The sampling space of the `data` freqmin: The lower frequency bound freqmax: The upper frequency bound - smooth_N: integer, it defines the half window length to smooth + smoothspect_N: integer, it defines the half window length to smooth n_taper, optional: integer, define the width of the taper in samples RETURNS: ---------------------- @@ -1103,10 +1113,10 @@ def whiten_1D(timeseries, fft_para: ConfigParameters, n_taper): spec_out[0:ix00] = 0.0 + 0.0j spec_out[ix11:] = 0.0 + 0.0j - if fft_para.smooth_N <= 1: + if fft_para.smoothspect_N <= 1: spec_out[ix00:ix11] = np.exp(1.0j * np.angle(spec_out[ix00:ix11])) else: - spec_out[ix00:ix11] /= moving_ave(np.abs(spec_out[ix00:ix11]), fft_para.smooth_N) + spec_out[ix00:ix11] /= moving_ave(np.abs(spec_out[ix00:ix11]), fft_para.smoothspect_N) x = np.linspace(np.pi / 2.0, np.pi, ix0 - ix00) spec_out[ix00:ix0] *= np.cos(x) ** 2 @@ -1129,7 +1139,7 @@ def whiten_2D(timeseries, fft_para: ConfigParameters, n_taper): dt: The sampling space of the `data` freqmin: The lower frequency bound freqmax: The upper frequency bound - smooth_N: integer, it defines the half window length to smooth + smoothspect_N: integer, it defines the half window length to smooth n_taper, optional: integer, define the width of the taper in samples RETURNS: ---------------------- @@ -1156,10 +1166,10 @@ def whiten_2D(timeseries, fft_para: ConfigParameters, n_taper): spec_out[:, 0:ix00] = 0.0 + 0.0j spec_out[:, ix11:] = 0.0 + 0.0j - if fft_para.smooth_N <= 1: + if fft_para.smoothspect_N <= 1: spec_out[:, ix00:ix11] = np.exp(1.0j * np.angle(spec_out[:, ix00:ix11])) else: - spec_out[:, ix00:ix11] /= moving_ave_2D(np.abs(spec_out[:, ix00:ix11]), fft_para.smooth_N) + spec_out[:, ix00:ix11] /= moving_ave_2D(np.abs(spec_out[:, ix00:ix11]), fft_para.smoothspect_N) x = np.linspace(np.pi / 2.0, np.pi, ix0 - ix00) spec_out[:, ix00:ix0] *= np.cos(x) ** 2 diff --git a/tests/data/acc/CIHEC__BHN___2022002.ms b/tests/data/acc/CIHEC__BHN___2022002.ms new file mode 100644 index 00000000..e2811b37 Binary files /dev/null and b/tests/data/acc/CIHEC__BHN___2022002.ms differ diff --git a/tests/test_cross_correlation.py b/tests/test_cross_correlation.py index a793b921..98da8dab 100644 --- a/tests/test_cross_correlation.py +++ b/tests/test_cross_correlation.py @@ -81,7 +81,10 @@ def get_inventory(self, timespan: DateTimeRange, station: Station) -> obspy.Inve @pytest.mark.parametrize("substack", [True, False]) @pytest.mark.parametrize("substack_len", [1, 2]) @pytest.mark.parametrize("inc_hours", [0, 24]) -def test_correlation(rm_resp: RmResp, cc_method: CCMethod, substack: bool, substack_len: int, inc_hours: int): +@pytest.mark.parametrize("dpath", ["./data/cc", "./data/acc"]) +def test_cross_correlation( + rm_resp: RmResp, cc_method: CCMethod, substack: bool, substack_len: int, inc_hours: int, dpath: str +): config = ConfigParameters() config.samp_freq = 1.0 config.rm_resp = rm_resp @@ -90,7 +93,7 @@ def test_correlation(rm_resp: RmResp, cc_method: CCMethod, substack: bool, subst if substack: config.substack = substack config.substack_len = substack_len * config.cc_len - path = os.path.join(os.path.dirname(__file__), "./data/cc") + path = os.path.join(os.path.dirname(__file__), dpath) raw_store = SCEDCS3DataStore(path, MockCatalogMock()) ts = raw_store.get_timespans() diff --git a/tests/test_noise_module.py b/tests/test_noise_module.py index 20e9777a..c3d08a24 100644 --- a/tests/test_noise_module.py +++ b/tests/test_noise_module.py @@ -63,10 +63,12 @@ def test_detrend(data: np.ndarray): @pytest.mark.parametrize("freq_norm", [FreqNorm.NO, FreqNorm.RMA]) @pytest.mark.parametrize("time_norm", [TimeNorm.ONE_BIT, TimeNorm.RMA]) -def test_noise_processing(time_norm: TimeNorm, freq_norm: FreqNorm): +@pytest.mark.parametrize("smoothspect_N", [10, 1]) +def test_noise_processing(time_norm: TimeNorm, freq_norm: FreqNorm, smoothspect_N: int): config = ConfigParameters() config.time_norm = time_norm config.freq_norm = freq_norm + config.smoothspect_N = smoothspect_N dataS = np.random.random([2, 500]) noise_processing(config, dataS) diff --git a/tests/test_whiten.py b/tests/test_whiten.py index 929ff9bf..f678b215 100644 --- a/tests/test_whiten.py +++ b/tests/test_whiten.py @@ -21,7 +21,7 @@ def whiten_original(data, fft_para: ConfigParameters): dt: The sampling space of the `data` freqmin: The lower frequency bound freqmax: The upper frequency bound - smooth_N: integer, it defines the half window length to smooth + smoothspect_N: integer, it defines the half window length to smooth freq_norm: whitening method between 'one-bit' and 'RMA' RETURNS: ---------------------- @@ -62,7 +62,7 @@ def whiten_original(data, fft_para: ConfigParameters): FFTRawSign[:, left:right] = np.exp(1j * np.angle(FFTRawSign[:, left:right])) elif fft_para.freq_norm == FreqNorm.RMA: for ii in range(data.shape[0]): - tave = moving_ave(np.abs(FFTRawSign[ii, left:right]), fft_para.smooth_N) + tave = moving_ave(np.abs(FFTRawSign[ii, left:right]), fft_para.smoothspect_N) FFTRawSign[ii, left:right] = FFTRawSign[ii, left:right] / tave # Right tapering: FFTRawSign[:, right:high] = np.cos(np.linspace(0.0, np.pi / 2.0, high - right)) ** 2 * np.exp( @@ -81,7 +81,7 @@ def whiten_original(data, fft_para: ConfigParameters): if fft_para.freq_norm == FreqNorm.PHASE_ONLY: FFTRawSign[left:right] = np.exp(1j * np.angle(FFTRawSign[left:right])) elif fft_para.freq_norm == FreqNorm.RMA: - tave = moving_ave(np.abs(FFTRawSign[left:right]), fft_para.smooth_N) + tave = moving_ave(np.abs(FFTRawSign[left:right]), fft_para.smoothspect_N) FFTRawSign[left:right] = FFTRawSign[left:right] / tave # Right tapering: FFTRawSign[right:high] = np.cos(np.linspace(0.0, np.pi / 2.0, high - right)) ** 2 * np.exp( @@ -107,7 +107,7 @@ def whiten1d(freq_norm: FreqNorm): fft_para.samp_freq = 1.0 fft_para.freqmin = 0.01 fft_para.freqmax = 0.2 - fft_para.smooth_N = 1 + fft_para.smoothspect_N = 1 fft_para.freq_norm = freq_norm data = np.random.random(1000) @@ -126,7 +126,7 @@ def whiten2d(freq_norm: FreqNorm): fft_para.samp_freq = 1.0 fft_para.freqmin = 0.01 fft_para.freqmax = 0.2 - fft_para.smooth_N = 1 + fft_para.smoothspect_N = 1 fft_para.freq_norm = freq_norm data = np.random.random((5, 1000)) diff --git a/tutorials/monitoring/attenuation_singlestation.ipynb b/tutorials/monitoring/attenuation_singlestation.ipynb index c526d73c..95c90b2b 100644 --- a/tutorials/monitoring/attenuation_singlestation.ipynb +++ b/tutorials/monitoring/attenuation_singlestation.ipynb @@ -380,9 +380,9 @@ "outputs": [], "source": [ "# get symmetric waveforms and determine the measuring window based on noise level (here is using 3 times of mad)\n", - "indx = npts // 2 # half-side number of points\n", - "data_sym=np.ndarray((nfreq,indx+1)) # two-side averaged stack CF\n", - "fmsv_mean=np.ndarray((fnum,nfreq+1,indx+1))\n", + "half_npts = npts // 2 # half-side number of points\n", + "data_sym=np.ndarray((nfreq,half_npts+1)) # two-side averaged stack CF\n", + "fmsv_mean=np.ndarray((fnum,nfreq+1,half_npts+1))\n", "\n", "# noise level setting\n", "ratio=config_monito.ratio\n", @@ -393,7 +393,7 @@ " for fb in range(nfreq):\n", " fmin=config_monito.freq[fb]\n", " fmax=config_monito.freq[fb+1] # stack positive and negative lags \n", - " sym=get_symmetric(msv_mean[aa][fb+1],indx)\n", + " sym=get_symmetric(msv_mean[aa][fb+1],half_npts)\n", " data_sym[fb]=sym\n", " Val_mad=mad(sym)\n", " level[aa][fb]=Val_mad*ratio\n", @@ -401,10 +401,10 @@ " for pt in range(len(sym)):\n", " if (sym[pt] < float(level[aa][fb])):\n", " twinbe[aa][fb][0]=int((1/fmin)*5)\n", - " twinbe[aa][fb][1]=int(msv[aa][0][0][indx+pt])\n", + " twinbe[aa][fb][1]=int(msv[aa][0][0][half_npts+pt])\n", " print(aa,fb,pt,sym[pt],level[aa][fb],twinbe[aa][fb])\n", " break\n", - " fmsv_mean[aa]=[msv[aa][0][0][indx:],data_sym[0],data_sym[1],data_sym[2]]\n", + " fmsv_mean[aa]=[msv[aa][0][0][half_npts:],data_sym[0],data_sym[1],data_sym[2]]\n", " plot_fmsv_waveforms(config_monito.freq,fmsv_mean[aa],fname[aa],level[aa],twinbe[aa])\n", "\n" ] @@ -431,39 +431,42 @@ "\n", "The optimal value of the parameters $l$ and $b$ is determined by minimizing the sum of squared residuals (SSR) between *Eobs* and *Esyn* through the grid search. (please refer to Hirose et al. 2019, 2022)
\n", "\\begin{gather*}\n", - "SSR=\\sum\\limits_{i=1}^{6} \\sum\\limits_{j=1}^{N} (log_{10}E_{obs}(t_i,j) - log_{10}E_{syn}(t_i,j))^2\n", + "SSR=\\sum\\limits_{i=1}^{Nwindows} \\sum\\limits_{j=1}^{Npairs} (E_{obs}(t_i,j) - E_{syn}(t_i,j))^2\n", "\\end{gather*}\n", - "*Here i is the number of time windows and j is the number of station pairs*." + "*, where i is the number of time windows and j is the number of station pairs*.\n", + "\n", + "\n", + "Here, referring to Hirose et al. (2019, 2022), the Nwindow is fixed at 6 when choosing a multi-window calculation. \n" ] }, { "cell_type": "code", "execution_count": null, - "id": "35c58624", + "id": "952928b5", "metadata": {}, "outputs": [], "source": [ - "def plot_fitting_curves(mean_free,intrinsic_b,tt,Eobs,Esyn,fname,dist,twin):\n", - " numb=len(intrinsic_b)\n", - " plt.figure(figsize=(8,2))\n", - " for nb in range(numb):\n", - " \n", - " plt.yscale('log', base=10)\n", - " #plt.xlim(0,120) \n", - " pymin=np.min(Eobs[nb][:-2]/2)\n", - " pymax=np.max(Eobs[nb][:-2]*2)\n", - " plt.ylim( pymin , pymax )\n", - " plt.plot( tt, Eobs[nb], \"k-\", linewidth=0.5)\n", - " plt.plot( tt, Esyn[nb], \"b-\", linewidth=1)\n", - " plt.plot([twin[0],twin[0],twin[-1],twin[-1],twin[0]],[pymin, pymax,pymax,pymin,pymin],\"r\", linewidth=2)\n", - "\n", - " plt.title(\"%s %.2fkm @%4.2f-%4.2f Hz, mean_free: %.2f b: %.2f~%.2f\"\n", - " % ( fname,dist,fmin,fmax,mean_free,y[0],y[-1]))\n", - " plt.xlabel(\"Time [s]\")\n", - " plt.ylabel(\"Energy density Amplitude\")\n", - " plt.tight_layout() \n", - " plt.show()\n", - " " + "def plot_fmsv_multiwindows(freq,wav,fname,noise_level,twin,pretwin):\n", + " nfreq = len(freq) - 1\n", + " fig, ax = plt.subplots(1,nfreq, figsize=(12,2), sharex=False)\n", + "\n", + " for fb in range(nfreq):\n", + " fmin=freq[fb]\n", + " fmax=freq[fb+1]\n", + " absy=1 #max(wav[fb], key=abs)\n", + " ax[fb].plot([wav[0][0],wav[0][-1]],[noise_level[fb],noise_level[fb]],c='blue',marker='.',ls='--', linewidth=2)\n", + " for ncoda in range(len(twin[fb])):\n", + " ax[fb].plot([twin[fb][ncoda][0],twin[fb][ncoda][0]],[-0.1,absy],c='red',ls='-', linewidth=0.25)\n", + " ax[fb].plot([twin[fb][ncoda][1],twin[fb][ncoda][1]],[-0.1,absy],c='red',ls='-', linewidth=0.25)\n", + " ax[fb].plot([pretwin[fb][0],pretwin[fb][0]],[-0.1,absy],c='orange',marker='.',ls='--', linewidth=2)\n", + " ax[fb].plot([pretwin[fb][1],pretwin[fb][1]],[-0.1,absy],c='orange',marker='.',ls='--', linewidth=2)\n", + " ax[fb].set_yscale('log', base=10)\n", + " ax[fb].plot(wav[0],wav[fb+1], \"k-\", linewidth=0.5)\n", + " ax[fb].set_xlabel(\"Time [s]\")\n", + " ax[fb].set_ylabel(\"Amplitude in log-scale\")\n", + " ax[fb].set_title( \"%s @%4.2f-%4.2f Hz\" % ( fname,fmin,fmax ) )\n", + " fig.tight_layout()\n", + " plt.show()" ] }, { @@ -475,8 +478,30 @@ "source": [ "cvel=np.full(nfreq, config_monito.cvel) # Rayleigh wave velocities over the freqency bands\n", "mfpx=np.zeros(1) # mean_free_path search array\n", - "intby=np.zeros(30) # intrinsic_b search array\n", - "config_monito.intb_interval_base=0.01 # interval base for a grid-searching process\n" + "intby=np.zeros(30) # intrinsic_b search array\n", + "nwindows=1 # number of sliced coda windows for fitting (1 or 6 fixed)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81465475", + "metadata": {}, + "outputs": [], + "source": [ + "new_twin = np.zeros((fnum,nfreq,nwindows,2))\n", + "if nwindows == 1:\n", + " for aa in range(fnum):\n", + " for fb in range(nfreq):\n", + " new_twin[aa,fb,0]=[twinbe[aa,fb,0],twinbe[aa,fb,1]]\n", + "else:\n", + " for aa in range(fnum):\n", + " for fb in range(nfreq):\n", + " tb=twinbe[aa,fb,0]\n", + " te=twinbe[aa,fb,1]\n", + " new_twin[aa,fb] = window_determine(tb,te)\n", + "\n", + " plot_fmsv_multiwindows(config_monito.freq,fmsv_mean[aa],fname[aa],level[aa],new_twin[aa], twinbe[aa])" ] }, { @@ -490,17 +515,22 @@ "SSR_final=np.zeros((len(mfpx),len(intby)))\n", "SSR=np.zeros((nfreq,len(mfpx),len(intby)))\n", "\n", + "\n", "for fb in range(nfreq):\n", " fmin=config_monito.freq[fb]\n", " fmax=config_monito.freq[fb+1]\n", " c=cvel[fb]\n", - " SSR_final[:][:]=0.\n", + " \n", + " fmsv_mean_single = np.zeros((1,half_npts+1))\n", + " fmsv_mean_single[0] = fmsv_mean[0,fb+1,:] # get the mean-squared value on the targeted frequency band\n", + " \n", + " coda_single_band = new_twin[:,fb,:]\n", " \n", " # parameters for getting the sum of squared residuals (SSR) between Eobs and Esyn \n", - " para={ 'fb':fb, 'vdist':vdist, 'npts':npts, 'dt':dt, 'cvel':c, \\\n", - " 'mfp':mfpx, 'intb':intby,'twin':twinbe, 'fmsv':fmsv_mean }\n", + " para={ 'vdist':vdist, 'npts':npts, 'dt':dt, 'cvel':c, \\\n", + " 'mfp':mfpx, 'intb':intby, 'twin':coda_single_band, 'fmsv':fmsv_mean_single }\n", " # call function get_SSR\n", - " SSR_final, mfpx, intby = get_SSR(fnum, para )\n", + " SSR_final, mfpx, intby = get_SSR_codawindows(para)\n", " \n", " SSR[fb]=SSR_final\n" ] @@ -520,17 +550,17 @@ "metadata": {}, "outputs": [], "source": [ - "def plot_fitting_result(mean_free,intrinsic_b,tt,Eobs,Esyn,fname,dist,twin,fmin,fmax):\n", + "def plot_singwindow_fitting_result(mean_free,intrinsic_b,tt,Eobs,Esyn,fname,dist,twin,fmin,fmax,win_num):\n", " plt.figure(figsize=(4,2))\n", " plt.yscale('log', base=10)\n", " \n", " intrinsic_Q=(2.0*np.pi*((fmin+fmax)/2))/intrinsic_b\n", " \n", - " pymax=np.max(Eobs[:-2]*5)\n", + " pymax=np.max(Eobs[win_num-1,:-2]*5)\n", " pymin=10**(-4)\n", " plt.ylim( pymin , pymax )\n", - " plt.plot( tt, Eobs, \"k-\", linewidth=1)\n", - " plt.plot( tt, Esyn, \"b--\", linewidth=1)\n", + " plt.plot( tt, Eobs[win_num-1], \"k-\", linewidth=1)\n", + " plt.plot( tt, Esyn[win_num-1], \"b--\", linewidth=1)\n", " plt.plot([twin[0],twin[0],twin[-1],twin[-1],twin[0]],[pymin, pymax,pymax,pymin,pymin],\"r\", linewidth=2)\n", "\n", " plt.title(\"%s %.1fkm @%4.2f-%4.2f Hz, \\nintrinsic b: %.2f, intrinsic Q: %.2f\"\n", @@ -539,7 +569,30 @@ " plt.ylabel(\"Energy density Amp\")\n", " plt.tight_layout() \n", " plt.show()\n", - " " + " \n", + "def plot_multiwindow_fitting_result(mean_free,intrinsic_b,tt,Eobs,Esyn,fname,dist,twin,fmin,fmax,win_num):\n", + " nwindows=twin.shape[0]\n", + " \n", + " pymax=np.max(Eobs[:-2]*5)\n", + " pymin=10**(-6)\n", + " fig, ax= plt.subplots(nwindows, figsize=(6,8))\n", + " for k in range(nwindows):\n", + " ax[k].set_yscale('log', base=10)\n", + " ax[k].set_ylim( pymin , pymax )\n", + " ax[k].plot( tt, Eobs[k], \"k-\", linewidth=1)\n", + " ax[k].plot( tt, Esyn[k], \"b--\", linewidth=1)\n", + " \n", + " ax[k].plot([twin[k,0],twin[k,0]], [0, pymax], 'r--', label=f'[{twin[k,0]:.2f}, {twin[k,1]:.2f}] sec')\n", + " ax[k].plot([twin[k,1], twin[k,1]], [0, pymax], 'r--')\n", + " ax[k].legend(loc='upper right')\n", + " # ax[k].set_xlim(0, tt[-20])\n", + " ax[0].set_title(\"Window no. %d %s @%4.2f-%4.2f Hz, intrinsic b: %.2f, mean free path: %.2f\" \\\n", + " % ( win_num, fname,fmin,fmax,intrinsic_b, mean_free ) )\n", + " ax[-1].set_xlabel(\"Lag time (sec)\")\n", + " ax[-1].set_ylabel(\"Energy density Amp\")\n", + " plt.tight_layout() \n", + " plt.show()\n", + " " ] }, { @@ -553,22 +606,33 @@ "result_intb=np.ndarray((nfreq))\n", "result_mfp=np.ndarray((nfreq))\n", "\n", - "Eobs=np.ndarray((nfreq,npts//2+1))\n", - "Esyn=np.ndarray((nfreq,npts//2+1))\n", - "aa=0\n", - "r=np.take(vdist[aa],0)\n", + "Eobs=np.ndarray((fnum, nfreq, nwindows, half_npts+1))\n", + "Esyn=np.ndarray((fnum, nfreq, nwindows, half_npts+1))\n", + "scaling_amp=np.ndarray((nfreq, nwindows))\n", + "\n", "for fb in range(nfreq): \n", " fmin=config_monito.freq[fb]\n", " fmax=config_monito.freq[fb+1]\n", - "\n", - " # parameters for getting optimal value from the sum of squared residuals (SSR) between Eobs and Esyn \n", - " para={ 'fb':fb, 'fmin':fmin, 'fmax':fmax, 'vdist':vdist, 'npts':npts, 'dt':dt, 'cvel':c, 'filenum':aa, \\\n", - " 'mfp':mfpx, 'intb':intby,'twin':twinbe, 'fmsv':fmsv_mean, 'SSR':SSR , 'sta':target_pair}\n", - " # call function get_optimal\n", - " result_intb[fb], result_mfp[fb], Eobs[fb], Esyn[fb] = get_optimal_Esyn(fnum,para)\n", + " c=cvel[fb]\n", + " \n", + " fmsv_mean_single = np.zeros((1,half_npts+1))\n", + " fmsv_mean_single[0] = fmsv_mean[0,fb+1,:] # get the mean-squared value on the targeted frequency band\n", + " \n", + " coda_single_band = new_twin[:,fb,:]\n", + " # parameters for getting the sum of squared residuals (SSR) between Eobs and Esyn \n", + " para={ 'fmin':fmin, 'fmax':fmax, 'vdist':vdist, 'npts':npts, 'dt':dt, 'cvel':c, \\\n", + " 'mfp':mfpx, 'intb':intby, 'twin':coda_single_band, 'fmsv':fmsv_mean_single, \\\n", + " 'SSR':SSR[fb] , 'sta':fname }\n", + " # call function get_SSR\n", + " result_intb[fb], result_mfp[fb], Eobs[fnum-1, fb], Esyn[fnum-1, fb], scaling_amp[fb] = get_optimal_Esyn(para)\n", " \n", - " plot_fitting_result(result_mfp[fb], result_intb[fb], fmsv_mean[aa][0][:],\n", - " Eobs[fb], Esyn[fb], target_pair, vdist[aa], twinbe[aa][fb], fmin, fmax)\n" + " # plotting fitting results \n", + " if nwindows==1:\n", + " plot_singwindow_fitting_result(result_mfp[fb], result_intb[fb], fmsv_mean[fnum-1,0,:], Eobs[fnum-1, fb], Esyn[fnum-1, fb],\n", + " fname, vdist, coda_single_band[0], fmin, fmax, nwindows) \n", + " else:\n", + " plot_multiwindow_fitting_result(result_mfp[fb], result_intb[fb], fmsv_mean[fnum-1,0,:], Eobs[fnum-1, fb], Esyn[fnum-1, fb],\n", + " fname, vdist, coda_single_band[0], fmin, fmax, nwindows) \n" ] }, { diff --git a/tutorials/monitoring/monitoring_demo.ipynb b/tutorials/monitoring/monitoring_demo.ipynb index bb51c663..0a499c09 100644 --- a/tutorials/monitoring/monitoring_demo.ipynb +++ b/tutorials/monitoring/monitoring_demo.ipynb @@ -138,7 +138,7 @@ "\n", "# TEMPORAL and SPECTRAL NORMALISATION\n", "config.freq_norm= FreqNorm.RMA # choose between \"rma\" for a soft whitenning or \"no\" for no whitening. Pure whitening is not implemented correctly at this point.\n", - "config.smoothspect_N = 10 # moving window length to smooth spectrum amplitude (points)\n", + "config.smoothspect_N = 1 # moving window length to smooth spectrum amplitude (points)\n", " # here, choose smoothspect_N for the case of a strict whitening (e.g., phase_only)\n", "\n", "config.time_norm = TimeNorm.ONE_BIT # 'no' for no normalization, or 'rma', 'one_bit' for normalization in time domain,\n", @@ -319,25 +319,25 @@ "# --- parameters for measuring velocity changes ---\n", "# pre-defined group velocity to window direct and code waves\n", "config_monito.vmin = 2.0 # minimum velocity of the direct waves -> start of the coda window\n", - "config_monito.lwin = 10.0 # window length in sec for the coda waves\n", + "config_monito.lwin = 20.0 # window length in sec for the coda waves\n", "\n", "# basic parameters\n", - "config_monito.freq = [0.25, 0.5, 1.0] # targeted frequency band for waveform monitoring\n", + "config_monito.freq = [0.25, 0.5, 2.0] # targeted frequency band for waveform monitoring\n", "nfreq = len(config_monito.freq) - 1\n", "config_monito.onelag = False # make measurement one one lag or two\n", "config_monito.norm_flag = True # whether to normalize the cross-correlation waveforms\n", "config_monito.do_stretch = True # use strecthing method or not\n", "\n", "# parameters for stretching method\n", - "config_monito.epsilon = 0.02 # limit for dv/v (in decimal)\n", - "config_monito.nbtrial = 100 # number of increment of dt [-epsilon,epsilon] for the streching\n", + "config_monito.epsilon = 0.05 # limit for dv/v (in decimal)\n", + "config_monito.nbtrial = 200 # number of increment of dt [-epsilon,epsilon] for the streching\n", "\n", "# coda window \n", "config_monito.coda_tbeg = 2.0 # begin time (sec) of the coda window in lag time\n", "config_monito.coda_tend = 8.0 # end time (sec) of the coda window in lag time\n", "\n", "# --- parameters for measuring attenuation ---\n", - "config_monito.smooth_winlen = 10.0 # smoothing window length\n", + "config_monito.smooth_winlen = 5.0 # smoothing window length\n", "config_monito.cvel = 2.6 # Rayleigh wave velocities over the freqency bands\n", "config_monito.atten_tbeg = 2.0\n", "config_monito.atten_tend = 10.0\n", @@ -663,7 +663,7 @@ "plt.title('Correlation coefficient to the reference')\n", "plt.ylabel('Correlation coefficient')\n", "plt.xlabel('Window number')\n", - "plt.legend()" + "plt.legend(loc='best')" ] }, { @@ -765,7 +765,7 @@ "ax[1].legend(('error_positive-lag','error_negative-lag'),loc='upper right', bbox_to_anchor=(1.35,1))\n", "ax[0].set_title('Estimated dv/v')\n", "ax[1].set_title('Estimated error')\n", - "ax[1].set_xlabel('Window number');ax[1].set_ylabel('%');ax[1].set_ylim(0,250)\n", + "ax[1].set_xlabel('Window number');ax[1].set_ylabel('%');\n", "ax[0].set_ylabel('%');plt.tight_layout()" ] }, @@ -1015,15 +1015,37 @@ "metadata": {}, "outputs": [], "source": [ - "twinbe=np.ndarray((1,1,2))\n", - "twinbe[0,0,0]=config_monito.atten_tbeg\n", - "twinbe[0,0,1]=config_monito.atten_tend\n", + "fnum=1\n", + "nfreq_target=1\n", + "twinbe=np.ndarray((fnum,nfreq_target,2))\n", + "twinbe[:,:,0]=config_monito.atten_tbeg\n", + "twinbe[:,:,1]=config_monito.atten_tend\n", "\n", "cvel=config_monito.cvel # Rayleigh wave velocities over the freqency bands\n", "vdist=np.zeros((1)) # station distance\n", "mfpx=np.zeros(1) # mean_free_path search array\n", - "intby=np.zeros(80) # intrinsic_b search array\n", - "config_monito.intb_interval_base=0.01 # interval base for a grid-searching process" + "intby=np.zeros(40) # intrinsic_b search array\n", + "nwindows=1 # number of sliced coda windows for fitting (1 or 6 fixed)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "new_twin = np.zeros((fnum,nwindows,2))\n", + "print(new_twin.shape)\n", + "if nwindows == 1:\n", + " for aa in range(fnum):\n", + " for fb in range(nfreq_target):\n", + " new_twin[aa,0]=[twinbe[aa,fb,0],twinbe[aa,fb,1]]\n", + "else:\n", + " for aa in range(fnum):\n", + " for fb in range(nfreq_target):\n", + " tb=twinbe[aa,fb,0]\n", + " te=twinbe[aa,fb,1]\n", + " new_twin[aa] = window_determine(tb,te)" ] }, { @@ -1034,19 +1056,24 @@ "source": [ "# getting the sum of squared residuals (SSR) between Eobs and Esyn \n", "SSR_final=np.zeros((len(mfpx),len(intby)))\n", - "SSR=np.zeros((nwin,1,len(mfpx),len(intby)))\n", + "SSR=np.zeros((nwin,len(mfpx),len(intby)))\n", + "\n", + "c=cvel\n", + "coda_single_band = new_twin[:,:,:] # coda window for the targeted frequency band\n", "\n", "for ntw in range(nwin):\n", - " data=np.zeros(shape=(1,2,half_npts+1))\n", - " data[0,:,:]=fmsv_mean[ntw]\n", - " #print(data.shape,fmsv_mean[ntw].shape)\n", + "\n", + " # get the mean of the mean squared value (MSV) for the coda window\n", + " fmsv_mean_single = np.zeros((1,half_npts+1))\n", + " fmsv_mean_single[0] = fmsv_mean[ntw,1,:]\n", + " \n", " # parameters for getting the sum of squared residuals (SSR) between Eobs and Esyn \n", - " para={ 'fb':0 , 'vdist':vdist, 'npts':npts_one_segmt, 'dt':dt, 'cvel':cvel, \\\n", - " 'mfp':mfpx, 'intb':intby,'twin':twinbe, 'fmsv':data }\n", + " para={ 'vdist':vdist, 'npts':npts_one_segmt, 'dt':dt, 'cvel':cvel, \\\n", + " 'mfp':mfpx, 'intb':intby, 'twin':coda_single_band, 'fmsv':fmsv_mean_single }\n", " # call function get_SSR\n", - " SSR_final, mfpx, intby = get_SSR(1, para )\n", + " SSR_final, mfpx, intby = get_SSR_codawindows(para)\n", "\n", - " SSR[ntw][0]=SSR_final\n", + " SSR[ntw]=SSR_final\n", "print(SSR.shape)" ] }, @@ -1056,25 +1083,49 @@ "metadata": {}, "outputs": [], "source": [ - "def plot_fitting_result(mean_free,intrinsic_b,tt,Eobs,Esyn,fname,dist,twind,fmin,fmax,win_num):\n", - " plt.figure(figsize=(6,2))\n", + "def plot_singwindow_fitting_result(mean_free,intrinsic_b,tt,Eobs,Esyn,fname,dist,twin,fmin,fmax,win_num):\n", + " plt.figure(figsize=(4,2))\n", " plt.yscale('log', base=10)\n", - "\n", - " pymax=np.max(Eobs[:-2]*5)\n", - " pymin=10**(-3)\n", - " print( pymin , pymax )\n", + " \n", + " intrinsic_Q=(2.0*np.pi*((fmin+fmax)/2))/intrinsic_b\n", + " \n", + " pymax=np.max(Eobs[win_num-1,:-2]*5)\n", + " pymin=10**(-4)\n", " plt.ylim( pymin , pymax )\n", - " plt.plot( tt, Eobs, \"k-\", linewidth=1)\n", - " plt.plot( tt, Esyn, \"b--\", linewidth=1)\n", - " plt.plot([twind[0],twind[0],twind[-1],twind[-1],twind[0]],[pymin, pymax,pymax,pymin,pymin],\"r\", linewidth=2)\n", + " plt.plot( tt, Eobs[win_num-1], \"k-\", linewidth=1)\n", + " plt.plot( tt, Esyn[win_num-1], \"b--\", linewidth=1)\n", + " plt.plot([twin[0],twin[0],twin[-1],twin[-1],twin[0]],[pymin, pymax,pymax,pymin,pymin],\"r\", linewidth=2)\n", "\n", - " plt.title(\"%s @%4.2f-%4.2f Hz, intrinsic b: %.2f, Window no. %d\" \\\n", - " % ( fname,fmin,fmax,intrinsic_b, win_num))\n", - " plt.xlabel(\"Lag time (sec)\")\n", + " plt.title(\"%s %.1fkm @%4.2f-%4.2f Hz, \\nintrinsic b: %.2f, intrinsic Q: %.2f\"\n", + " % ( fname,dist,fmin,fmax,intrinsic_b, intrinsic_Q))\n", + " plt.xlabel(\"Time [s]\")\n", " plt.ylabel(\"Energy density Amp\")\n", " plt.tight_layout() \n", " plt.show()\n", - " " + " \n", + "def plot_multiwindow_fitting_result(mean_free,intrinsic_b,tt,Eobs,Esyn,fname,dist,twin,fmin,fmax,win_num):\n", + " nwindows=twin.shape[0]\n", + " \n", + " pymax=np.max(Eobs[:-2]*5)\n", + " pymin=10**(-4)\n", + " fig, ax= plt.subplots(nwindows, figsize=(6,8))\n", + " for k in range(nwindows):\n", + " ax[k].set_yscale('log', base=10)\n", + " ax[k].set_ylim( pymin , pymax )\n", + " ax[k].plot( tt, Eobs[k], \"k-\", linewidth=1)\n", + " ax[k].plot( tt, Esyn[k], \"b--\", linewidth=1)\n", + " \n", + " ax[k].plot([twin[k,0],twin[k,0]], [0, pymax], 'r--', label=f'[{twin[k,0]:.2f}, {twin[k,1]:.2f}] sec')\n", + " ax[k].plot([twin[k,1], twin[k,1]], [0, pymax], 'r--')\n", + " ax[k].legend(loc='upper right')\n", + " # ax[k].set_xlim(0, tt[-20])\n", + " ax[0].set_title(\"Window no. %d %s @%4.2f-%4.2f Hz, intrinsic b: %.2f, mean free path: %.2f\" \\\n", + " % ( win_num, fname,fmin,fmax,intrinsic_b, mean_free ) )\n", + " ax[-1].set_xlabel(\"Lag time (sec)\")\n", + " ax[-1].set_ylabel(\"Energy density Amp\")\n", + " plt.tight_layout() \n", + " plt.show()\n", + " " ] }, { @@ -1091,33 +1142,39 @@ "outputs": [], "source": [ "# getting the optimal value from the SSR\n", - "result_intb=np.zeros((nwin,1))\n", - "result_mfp=np.zeros((nwin, 1))\n", + "result_intb=np.zeros((nwin))\n", + "result_mfp=np.zeros((nwin))\n", "\n", - "Eobs=np.ndarray((1,half_npts))\n", - "Esyn=np.ndarray((1,half_npts))\n", - "aa=0\n", - "r=np.take(vdist[aa],0) \n", + "Eobs=np.ndarray((fnum, nwin, nwindows, half_npts+1))\n", + "Esyn=np.ndarray((fnum, nwin, nwindows, half_npts+1))\n", + "scaling_amp=np.ndarray((nwin,nwindows))\n", "\n", "fmin=freq1\n", "fmax=freq2\n", "wfcen=2.0*np.pi*((freq1+freq2)/2.0)\n", "\n", "for ntw in range(nwin):\n", + " \n", + " fmsv_mean_single = np.zeros((fnum, half_npts+1))\n", + " fmsv_mean_single[0] = fmsv_mean[ntw,1,:]\n", "\n", - " data=np.zeros(shape=(1,2,half_npts+1))\n", - " data[0,:,:]=fmsv_mean[ntw]\n", - " # parameters for getting optimal value from the sum of squared residuals (SSR) between Eobs and Esyn \n", - " para={ 'fb':0, 'fmin':fmin, 'fmax':fmax, 'vdist':vdist, 'npts':npts_one_segmt, 'dt':dt, 'cvel':cvel, 'filenum':aa, \\\n", - " 'mfp':mfpx, 'intb':intby, 'twin':twinbe, 'fmsv':data, 'SSR':SSR[ntw] , 'sta':sta_pair}\n", - " # call function get_optimal\n", - " result_intb[ntw], result_mfp[ntw], Eobs, Esyn = get_optimal_Esyn(1,para)\n", - " # plotting fitting results\n", + " # parameters for getting the sum of squared residuals (SSR) between Eobs and Esyn \n", + " para={ 'fmin':fmin, 'fmax':fmax, 'vdist':vdist, 'npts':npts_one_segmt, 'dt':dt, 'cvel':cvel, \\\n", + " 'mfp':mfpx, 'intb':intby, 'twin':coda_single_band, 'fmsv':fmsv_mean_single, \\\n", + " 'SSR':SSR[ntw] , 'sta':sta_pair }\n", + " # call function get_SSR\n", + " result_intb[ntw], result_mfp[ntw], Eobs[fnum-1, ntw], Esyn[fnum-1, ntw], scaling_amp[ntw] = get_optimal_Esyn(para)\n", " if ntw == 32:\n", - " plot_fitting_result(result_mfp,result_intb[ntw],data[0,0,:], \n", - " Eobs,Esyn,sta_pair,vdist[0],twinbe[0][0],fmin,fmax,ntw)\n", - " \n", - "intQ=np.zeros((nwin,1)) \n", + " # plotting fitting results \n", + " if nwindows==1:\n", + " plot_singwindow_fitting_result(result_mfp[fb], result_intb[fb], \n", + " fmsv_mean[fnum-1,0,:], Eobs[fnum-1, fb], Esyn[fnum-1, fb],\n", + " sta_pair, vdist, coda_single_band[0], fmin, fmax, nwindows) \n", + " else:\n", + " plot_multiwindow_fitting_result(result_mfp[fb], result_intb[fb], \n", + " fmsv_mean[fnum-1,0,:], Eobs[fnum-1, fb], Esyn[fnum-1, fb],\n", + " sta_pair, vdist, coda_single_band[0], fmin, fmax, nwindows) \n", + "intQ=np.zeros((nwin)) \n", "intQ=wfcen/result_intb\n" ] }, @@ -1170,7 +1227,7 @@ "# Restore calendar time from cc_time array \n", "#cal_time=win_time[:nwin]\n", "cal_time=win_time[:len(win_time)//3]\n", - "print(len(cal_time),results_dvv.shape,result_intb[:,0].shape, intQ.shape )\n" + "print(len(cal_time),results_dvv.shape,result_intb[:].shape, intQ.shape )\n" ] }, { @@ -1187,9 +1244,9 @@ "'time': cal_time,\n", "'dvv': results_dvv,\n", "'err': results_err,\n", - "'int_b': result_intb[:,0],\n", + "'int_b': result_intb[:],\n", "'wfcen': np.full((nwin),wfcen),\n", - "'Q': intQ[:,0],\n", + "'Q': intQ[:],\n", "}\n", "\n", "df=pd.DataFrame(data)\n", @@ -1218,11 +1275,11 @@ "ax[1].set_title('Average dv/v')\n", "ax[1].set_ylabel('%')\n", "\n", - "ax[2].plot_date(t, result_intb[:,0], '.-', label='intrinsic b')\n", + "ax[2].plot_date(t, result_intb[:], '.-', label='intrinsic b')\n", "ax[2].set_title('Estimated intrinsic absorption parameter b')\n", "ax[2].set_ylabel('intrinsic b')\n", "\n", - "ax[3].plot_date(t,intQ[:,0],'.-', label='Q')\n", + "ax[3].plot_date(t,intQ[:],'.-', label='Q')\n", "ax[3].set_title('Estimated Q')\n", "ax[3].set_ylabel('Q value')\n", "ax[3].set_xlabel('Time')\n",