From e646d20da6f351609e5468842d1faaffa0c1e357 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Mon, 26 Aug 2024 13:27:12 -0400 Subject: [PATCH 01/14] initial code --- jwst/refpix/optimized_convolution.py | 197 ++++++++++++++++++++++ jwst/refpix/reference_pixels.py | 233 ++++++++++++++++++++++----- jwst/refpix/refpix_step.py | 33 +++- 3 files changed, 421 insertions(+), 42 deletions(-) create mode 100644 jwst/refpix/optimized_convolution.py diff --git a/jwst/refpix/optimized_convolution.py b/jwst/refpix/optimized_convolution.py new file mode 100644 index 0000000000..c5a54e446a --- /dev/null +++ b/jwst/refpix/optimized_convolution.py @@ -0,0 +1,197 @@ +# +# Module for using Reference Pixels to improve the 1/f noise, to be +# used be only for non-IRS2 data +# + +import logging +import numpy as np + +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) + + +def make_kernels(conv_kernel_model, detector, gausssmooth, halfwith): + """ + Make convolution kernels reference file's Fourier coefficients. + + Parameters: + ----------- + + conv_kernel_model : `~jwst.datamodels.ConvKernelModel` + Data model containing the Fourier coefficients from the reference files + + detector : str + Name of the detector of the input data + + gausssmooth : int + Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients + + halfwith : int + Half-width of convolution kernel to build from reference file's coefficients + + Returns: + -------- + + kernel_left: list + Contains the kernels appropriate for convolution with the left reference pixels + + kernel_right: list + Contains the kernels appropriate for convolution with the right reference pixels + + """ + + gamma, zeta = get_conv_kernel_coeffs(conv_kernel_model, detector) + if gamma is None or zeta is None: + log.info('Optimized convolution kernel coefficients NOT found for detector ', detector) + return None + + kernel_left = [] + kernel_right = [] + for chan in range(gamma.shape[0]): + n = len(gamma[chan]) - 1 + kernel_left = np.fft.fftshift(np.fft.irfft(gamma[chan]))[n - dn:n + dn + 1] + kernel_right = np.fft.fftshift(np.fft.irfft(zeta[chan]))[n - dn:n + dn + 1] + + x = np.arange(-dn, dn + 1) + window = np.exp(-x ** 2 / (2 * gausssmooth ** 2)) + window /= np.sum(window) + + kernel_right = np.convolve(kernel_right, window, mode='same') + kernel_left = np.convolve(kernel_left, window, mode='same') + + kernel_right += [kernel_right] + kernel_left += [kernel_left] + + return kernel_left, kernel_right + + +def get_conv_kernel_coeffs(conv_kernel_model, detector): + """ + Get the convolution kernels coefficients from the reference file + + Parameters: + ----------- + + conv_kernel_model : `~jwst.datamodels.ConvKernelModel` + Data model containing the Fourier coefficients from the reference files + + detector : str + Name of the detector of the input data + + Returns: + -------- + + gamma: numpy array + Fourier coefficients + + zeta: numpy array + Fourier coefficients + """ + + conv_kernel_model = conv_kernel_model.to_flat_dict() + gamma, zeta = None, None + for det in conv_kernel_model: + if det == detector: + gamma = conv_kernel_model[det]['gamma'] + zeta = conv_kernel_model[det]['zeta'] + break + return gamma, zeta + + +def apply_conv_kernel(datamodel, conv_kernel_model, sigreject=4, gausssmooth=1, halfwith=30): + """ + Apply the convolution kernel. + + Parameters: + ----------- + + datamodel : `~jwst.datamodel` + Data model containing the data to be corrected + + conv_kernel_model : `~jwst.datamodels.ConvKernelModel` + Data model containing the Fourier coefficients from the reference files + + sigreject: int + Number of sigmas to reject as outliers + + gausssmooth : int + Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients + + halfwith : int + Half-width of convolution kernel to build from reference file's coefficients + + Returns: + -------- + + datamodel : `~jwst.datamodel` + Data model with convolution + """ + + data = datamodel.data.copy()[0, :, :, :] + data = data.astype(float) + npix = data.shape[-1] + detector = datamodel.meta.instrument.detector + + kernels_l, kernels_r = make_kernels(conv_kernel_model, detector, gausssmooth, halfwith) + + nchan = len(kernels_l) + + # The subtraction below is needed to flag outliers in the reference pixels. + zeroim = data[0].astype(float) + data -= zeroim[np.newaxis, :, :] + + for i in range(data.shape[0]): + L = data[i, :, :4] + R = data[i, :, -4:] + + # Find the approximate standard deviations of the reference pixels + # using an outlier-robust median approach. Mask pixels that differ + # by more than sigreject sigma from this level. + # NOTE: The Median Absolute Deviation (MAD) is calculated as the + # median of the absolute differences between data values and their + # median. For normal distribution MAD is equal to 1.48 times the + # standard deviation but is a more robust estimate of the dispersion + # of data values.The calculation of MAD is straightforward but + # time-consuming, especially if MAD estimates are needed for the + # local environment around every pixel of a large image. The + # calculation is MAD = np.median(np.abs(x-np.median(x))). + # Reference: https://www.interstellarmedium.org/numerical_tools/mad/ + MAD = 1.48 + medL = np.median(L) + sigL = MAD * np.median(np.abs(L - medL)) + medR = np.median(R) + sigR = MAD * np.median(np.abs(R - medR)) + + # nL and nR are the number of good reference pixels in the left and right + # channel in each row. These will be used in lieu of replacing the values + # of those pixels directly. + goodL = 1 * (np.abs(L - medL) <= sigreject * sigL) + nL = np.sum(goodL, axis=1) + goodR = 1 * (np.abs(R - medR) <= sigreject * sigR) + nR = np.sum(goodR, axis=1) + + # Average of the left and right channels, replacing masked pixels with zeros. + # Appropriate normalization factors will be computed later. + L = np.sum(L * goodL, axis=1) / 4 + R = np.sum(R * goodR, axis=1) / 4 + for chan in range(nchan): + kernel_l = kernels_l[chan] + kernel_r = kernels_r[chan] + + # Compute normalizations so that we don't have to directly + # replace the values of flagged/masked reference pixels. + normL = np.convolve(np.ones(nL.shape), kernel_l, mode='same') + normL /= np.convolve(nL / 4, kernel_l, mode='same') + normR = np.convolve(np.ones(nR.shape), kernel_r, mode='same') + normR /= np.convolve(nR / 4, kernel_r, mode='same') + template = np.convolve(L, kernel_l, mode='same') * normL + template += np.convolve(R, kernel_r, mode='same') * normR + data[i, :, chan * npix * 3 // 4:(chan + 1) * npix * 3 // 4] -= template[:, np.newaxis] + + # Add the first read back in. + data += zeroim[np.newaxis, :, :] + datamodel.data[0, :, :, :] = data + + log.info('Optimized convolution kernel applied') + return datamodel + diff --git a/jwst/refpix/reference_pixels.py b/jwst/refpix/reference_pixels.py index 734a1e33bd..9259e81b0c 100644 --- a/jwst/refpix/reference_pixels.py +++ b/jwst/refpix/reference_pixels.py @@ -51,6 +51,7 @@ from ..lib import pipe_utils, reffile_utils from .irs2_subtract_reference import make_irs2_mask +from .optimized_convolution import apply_conv_kernel log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) @@ -134,7 +135,7 @@ SUBARRAY_SKIPPED = 3 -class Dataset(): +class Dataset: """Base Class to handle passing stuff from routine to routine Parameters: @@ -491,6 +492,21 @@ class NIRDataset(Dataset): side_gain: float gain to use in applying the side reference pixel correction + use_conv_kernel : boolean + If True an optimized convolution kernel will be used instead of the running median + + conv_kernel_model : `~jwst.datamodels.ConvKernelModel` + Data model containing the Fourier coefficients from the reference files + + sigreject: integer + Number of sigmas to reject as outliers + + gausssmooth : integer + Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients + + halfwith : integer + Half-width of convolution kernel to build from reference file's coefficients + """ def __init__(self, input_model, @@ -504,7 +520,12 @@ def __init__(self, input_model, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows=False) + odd_even_rows=False, + use_conv_kernel=False, + conv_kernel_model=None, + sigreject=None, + gausssmooth=None, + halfwith=None) # Set appropriate NIR sections self.is_irs2 = pipe_utils.is_irs2(input_model) @@ -1172,25 +1193,32 @@ def do_fullframe_corrections(self): # First transform pixeldq array to detector coordinates self.DMS_to_detector_dq() - for integration in range(self.nints): - for group in range(self.ngroups): - # - # Get the reference values from the top and bottom reference - # pixels - # - self.DMS_to_detector(integration, group) - thisgroup = self.group - refvalues = self.get_refvalues(thisgroup) - self.do_top_bottom_correction(thisgroup, refvalues) - if self.use_side_ref_pixels: - corrected_group = self.do_side_correction(thisgroup) - self.group = corrected_group - else: - self.group = thisgroup - # - # Now transform back from detector to DMS coordinates. - self.detector_to_DMS(integration, group) - log.setLevel(logging.INFO) + if self.use_conv_kernel: + self.input_model = apply_conv_kernel(self.input_model, + self.conv_kernel_model, + sigreject=self.sigreject, + gausssmooth=self.gaussmooth, + halfwith=self.halfwidth) + else: + for integration in range(self.nints): + for group in range(self.ngroups): + # + # Get the reference values from the top and bottom reference + # pixels + # + self.DMS_to_detector(integration, group) + thisgroup = self.group + refvalues = self.get_refvalues(thisgroup) + self.do_top_bottom_correction(thisgroup, refvalues) + if self.use_side_ref_pixels: + corrected_group = self.do_side_correction(thisgroup) + self.group = corrected_group + else: + self.group = thisgroup + # + # Now transform back from detector to DMS coordinates. + self.detector_to_DMS(integration, group) + log.setLevel(logging.INFO) return def do_subarray_corrections(self): @@ -1923,7 +1951,12 @@ def create_dataset(input_model, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows): + odd_even_rows, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith): """Create a dataset object from an input model. Parameters: @@ -1952,6 +1985,20 @@ def create_dataset(input_model, flag that controls whether odd and even-numbered rows are handled separately (MIR only) + use_conv_kernel : bool + If True an optimized convolution kernel will be used instead of the running median + + conv_kernel_model : `~jwst.datamodels.ConvKernelModel` + Data model containing the Fourier coefficients from the reference files + + sigreject: int + Number of sigmas to reject as outliers + + gausssmooth : int + Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients + + halfwith : int + Half-width of convolution kernel to build from reference file's coefficients """ detector = input_model.meta.instrument.detector @@ -1972,104 +2019,189 @@ def create_dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRS2': return NRS2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCA1': return NRCA1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCA2': return NRCA2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCA3': return NRCA3Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCA4': return NRCA4Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCALONG': return NRCALONGDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCB1': return NRCB1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCB2': return NRCB2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCB3': return NRCB3Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCB4': return NRCB4Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NRCBLONG': return NRCBLONGDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'NIS': return NIRISSDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'GUIDER1': return GUIDER1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) elif detector == 'GUIDER2': return GUIDER2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) else: log.error('Unrecognized detector') return NIRDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain) + side_gain, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) def correct_model(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows): + odd_even_rows, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith): """Wrapper to do Reference Pixel Correction on a JWST Model. Performs the correction on the datamodel @@ -2099,6 +2231,21 @@ def correct_model(input_model, odd_even_columns, flag that controls whether odd and even-numbered rows are handled separately (MIR only) + use_conv_kernel : bool + If True an optimized convolution kernel will be used instead of the running median + + conv_kernel_model : `~jwst.datamodels.ConvKernelModel` + Data model containing the Fourier coefficients from the reference files + + sigreject: int + Number of sigmas to reject as outliers + + gausssmooth : int + Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients + + halfwith : int + Half-width of convolution kernel to build from reference file's coefficients + """ if input_model.meta.instrument.name == 'MIRI': if reffile_utils.is_subarray(input_model): @@ -2110,7 +2257,12 @@ def correct_model(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows) + odd_even_rows, + use_conv_kernel, + conv_kernel_model, + sigreject, + gausssmooth, + halfwith) if input_dataset is None: status = SUBARRAY_DOESNTFIT @@ -2138,7 +2290,6 @@ def reference_pixel_correction(input_dataset): Corrected dataset """ - input_dataset.do_corrections() if input_dataset.input_model.meta.exposure.zero_frame: diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 1e1df78328..0eae62241e 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -25,6 +25,11 @@ class RefPixStep(Step): ovr_corr_mitigation_ftr = float(default=3.0) # Factor to avoid overcorrection of bad reference pixels for IRS2 preserve_irs2_refpix = boolean(default=False) # Preserve reference pixels in output irs2_mean_subtraction = boolean(default=False) # Apply a mean offset subtraction before IRS2 correction + use_conv_kernel = boolean(default=True) # For NIR data only, use the convolution kernel instead of the running median + sigreject = int(default=4) # Number of sigmas to reject as outliers + gausssmooth = int(default=1) # Width of Gaussian smoothing kernel to use as a low-pass filter + halfwith = int(default=30) # Half-width of convolution kernel to build + user_supplied_reffile = string(default=None) # ASDF user-supplied reference file """ reference_file_types = ['refpix'] @@ -80,12 +85,38 @@ def process(self, input): else: # Not an NRS IRS2 exposure. Do the normal refpix correction. datamodel = input_model.copy() + + # Get the reference file from CRDS or use user-supplied one + if input_model.meta.instrument.name == 'MIRI': + conv_kernel_model = None + else: + if self.user_supplied_reffile is not None: + conv_kernel_ref_filename = self.get_reference_file(datamodel, 'refpix') + if conv_kernel_ref_filename == 'N/A': + self.log.warning('No reference file found for the optimized convolution kernel.') + self.log.warning('REFPIX step will use a running median') + conv_kernel_model = None + else: + self.log.info('Using CRDS reference file: {}'.format(conv_kernel_ref_filename)) + conv_kernel_model = datamodels.ConvKernelModel(conv_kernel_ref_filename) + elif not self.use_conv_kernel: + conv_kernel_model = None + else: + self.log.info('Using user-supplied reference file: {}'.format(self.user_supplied_reffile)) + conv_kernel_model = datamodels.ConvKernelModel(self.user_supplied_reffile) + status = reference_pixels.correct_model(datamodel, self.odd_even_columns, self.use_side_ref_pixels, self.side_smoothing_length, self.side_gain, - self.odd_even_rows) + self.odd_even_rows, + self.use_conv_kernel, + conv_kernel_model, + self.sigreject, + self.gausssmooth, + self.halfwith + ) if status == reference_pixels.REFPIX_OK: datamodel.meta.cal_step.refpix = 'COMPLETE' From ee89fd27ba7cf963cce1eae3efd2fffd88f4e223 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Wed, 28 Aug 2024 09:41:50 -0400 Subject: [PATCH 02/14] minor change --- jwst/refpix/refpix_step.py | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 0eae62241e..27739acc11 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -26,9 +26,9 @@ class RefPixStep(Step): preserve_irs2_refpix = boolean(default=False) # Preserve reference pixels in output irs2_mean_subtraction = boolean(default=False) # Apply a mean offset subtraction before IRS2 correction use_conv_kernel = boolean(default=True) # For NIR data only, use the convolution kernel instead of the running median - sigreject = int(default=4) # Number of sigmas to reject as outliers - gausssmooth = int(default=1) # Width of Gaussian smoothing kernel to use as a low-pass filter - halfwith = int(default=30) # Half-width of convolution kernel to build + sigreject = integer(default=4) # Number of sigmas to reject as outliers + gausssmooth = integer(default=1) # Width of Gaussian smoothing kernel to use as a low-pass filter + halfwith = integer(default=30) # Half-width of convolution kernel to build user_supplied_reffile = string(default=None) # ASDF user-supplied reference file """ @@ -90,20 +90,21 @@ def process(self, input): if input_model.meta.instrument.name == 'MIRI': conv_kernel_model = None else: - if self.user_supplied_reffile is not None: - conv_kernel_ref_filename = self.get_reference_file(datamodel, 'refpix') - if conv_kernel_ref_filename == 'N/A': - self.log.warning('No reference file found for the optimized convolution kernel.') - self.log.warning('REFPIX step will use a running median') - conv_kernel_model = None - else: - self.log.info('Using CRDS reference file: {}'.format(conv_kernel_ref_filename)) - conv_kernel_model = datamodels.ConvKernelModel(conv_kernel_ref_filename) - elif not self.use_conv_kernel: + if not self.use_conv_kernel: conv_kernel_model = None else: - self.log.info('Using user-supplied reference file: {}'.format(self.user_supplied_reffile)) - conv_kernel_model = datamodels.ConvKernelModel(self.user_supplied_reffile) + if self.user_supplied_reffile is None: + conv_kernel_ref_filename = self.get_reference_file(datamodel, 'refpix') + if conv_kernel_ref_filename == 'N/A': + self.log.warning('No reference file found for the optimized convolution kernel.') + self.log.warning('REFPIX step will use a running median') + conv_kernel_model = None + else: + self.log.info('Using CRDS reference file: {}'.format(conv_kernel_ref_filename)) + conv_kernel_model = datamodels.ConvKernelModel(conv_kernel_ref_filename) + else: + self.log.info('Using user-supplied reference file: {}'.format(self.user_supplied_reffile)) + conv_kernel_model = datamodels.ConvKernelModel(self.user_supplied_reffile) status = reference_pixels.correct_model(datamodel, self.odd_even_columns, From f42375bfeaf5e72f7f480e697ee267d2d3f05026 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Wed, 28 Aug 2024 14:51:57 -0400 Subject: [PATCH 03/14] testing --- jwst/refpix/reference_pixels.py | 177 ++++++++++++++++---------------- jwst/refpix/refpix_step.py | 8 +- 2 files changed, 97 insertions(+), 88 deletions(-) diff --git a/jwst/refpix/reference_pixels.py b/jwst/refpix/reference_pixels.py index 9259e81b0c..52fe22310f 100644 --- a/jwst/refpix/reference_pixels.py +++ b/jwst/refpix/reference_pixels.py @@ -513,19 +513,19 @@ def __init__(self, input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, - side_gain): + side_gain, + use_conv_kernel=False, + conv_kernel_model=None, + sigreject=None, + gausssmooth=None, + halfwith=None): super(NIRDataset, self).__init__(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows=False, - use_conv_kernel=False, - conv_kernel_model=None, - sigreject=None, - gausssmooth=None, - halfwith=None) + odd_even_rows=False) # Set appropriate NIR sections self.is_irs2 = pipe_utils.is_irs2(input_model) @@ -1193,6 +1193,9 @@ def do_fullframe_corrections(self): # First transform pixeldq array to detector coordinates self.DMS_to_detector_dq() + print('\n ***** at the do_fullframe_correction function') + print(self.conv_kernel_model, self.sigreject, self.gaussmooth, self.halfwidth) + if self.use_conv_kernel: self.input_model = apply_conv_kernel(self.input_model, self.conv_kernel_model, @@ -2020,165 +2023,165 @@ def create_dataset(input_model, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRS2': return NRS2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCA1': return NRCA1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCA2': return NRCA2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCA3': return NRCA3Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCA4': return NRCA4Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCALONG': return NRCALONGDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCB1': return NRCB1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCB2': return NRCB2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCB3': return NRCB3Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCB4': return NRCB4Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NRCBLONG': return NRCBLONGDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'NIS': return NIRISSDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'GUIDER1': return GUIDER1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) elif detector == 'GUIDER2': return GUIDER2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) else: log.error('Unrecognized detector') return NIRDataset(input_model, @@ -2186,11 +2189,11 @@ def create_dataset(input_model, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + use_conv_kernel=use_conv_kernel, + conv_kernel_model=conv_kernel_model, + sigreject=sigreject, + gausssmooth=gausssmooth, + halfwith=halfwith) def correct_model(input_model, odd_even_columns, diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 27739acc11..76869598aa 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -89,10 +89,16 @@ def process(self, input): # Get the reference file from CRDS or use user-supplied one if input_model.meta.instrument.name == 'MIRI': conv_kernel_model = None + elif 'FULL' not in input_model.meta.subarray.name: + conv_kernel_model = None + self.log.info('Optimized Convolution Kernel not applied for subarray data') else: if not self.use_conv_kernel: conv_kernel_model = None else: + import asdf + conv_kernel_model = asdf.open(self.user_supplied_reffile) + ''' if self.user_supplied_reffile is None: conv_kernel_ref_filename = self.get_reference_file(datamodel, 'refpix') if conv_kernel_ref_filename == 'N/A': @@ -105,7 +111,7 @@ def process(self, input): else: self.log.info('Using user-supplied reference file: {}'.format(self.user_supplied_reffile)) conv_kernel_model = datamodels.ConvKernelModel(self.user_supplied_reffile) - + ''' status = reference_pixels.correct_model(datamodel, self.odd_even_columns, self.use_side_ref_pixels, From afd5ccdcb07ec1142e78663a9a257b57792a26c4 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Thu, 29 Aug 2024 13:26:22 -0400 Subject: [PATCH 04/14] continuing with implementation --- jwst/refpix/optimized_convolution.py | 61 ++++---- jwst/refpix/reference_pixels.py | 224 ++++++++------------------- jwst/refpix/refpix_step.py | 23 ++- 3 files changed, 104 insertions(+), 204 deletions(-) diff --git a/jwst/refpix/optimized_convolution.py b/jwst/refpix/optimized_convolution.py index c5a54e446a..8ba491fdc9 100644 --- a/jwst/refpix/optimized_convolution.py +++ b/jwst/refpix/optimized_convolution.py @@ -10,7 +10,7 @@ log.setLevel(logging.DEBUG) -def make_kernels(conv_kernel_model, detector, gausssmooth, halfwith): +def make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth): """ Make convolution kernels reference file's Fourier coefficients. @@ -23,46 +23,44 @@ def make_kernels(conv_kernel_model, detector, gausssmooth, halfwith): detector : str Name of the detector of the input data - gausssmooth : int + gaussmooth : int Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients - halfwith : int + halfwidth : int Half-width of convolution kernel to build from reference file's coefficients Returns: -------- - kernel_left: list - Contains the kernels appropriate for convolution with the left reference pixels - - kernel_right: list - Contains the kernels appropriate for convolution with the right reference pixels + kernels: list + Contains the kernels appropriate for convolution with the left and right reference pixels """ gamma, zeta = get_conv_kernel_coeffs(conv_kernel_model, detector) + gamma=None if gamma is None or zeta is None: - log.info('Optimized convolution kernel coefficients NOT found for detector ', detector) + log.info(f'Optimized convolution kernel coefficients NOT found for detector {detector}') return None kernel_left = [] kernel_right = [] for chan in range(gamma.shape[0]): n = len(gamma[chan]) - 1 - kernel_left = np.fft.fftshift(np.fft.irfft(gamma[chan]))[n - dn:n + dn + 1] - kernel_right = np.fft.fftshift(np.fft.irfft(zeta[chan]))[n - dn:n + dn + 1] + kernel_left = np.fft.fftshift(np.fft.irfft(gamma[chan]))[n - halfwidth:n + halfwidth + 1] + kernel_right = np.fft.fftshift(np.fft.irfft(zeta[chan]))[n - halfwidth:n + halfwidth + 1] - x = np.arange(-dn, dn + 1) - window = np.exp(-x ** 2 / (2 * gausssmooth ** 2)) + x = np.arange(-halfwidth, halfwidth + 1) + window = np.exp(-x ** 2 / (2 * gaussmooth ** 2)) window /= np.sum(window) kernel_right = np.convolve(kernel_right, window, mode='same') kernel_left = np.convolve(kernel_left, window, mode='same') - kernel_right += [kernel_right] - kernel_left += [kernel_left] + kernel_right += kernel_right + kernel_left += kernel_left - return kernel_left, kernel_right + return [kernel_left, kernel_right] def get_conv_kernel_coeffs(conv_kernel_model, detector): @@ -87,18 +85,22 @@ def get_conv_kernel_coeffs(conv_kernel_model, detector): zeta: numpy array Fourier coefficients """ - - conv_kernel_model = conv_kernel_model.to_flat_dict() + mdl_dict = conv_kernel_model.to_flat_dict() gamma, zeta = None, None - for det in conv_kernel_model: - if det == detector: - gamma = conv_kernel_model[det]['gamma'] - zeta = conv_kernel_model[det]['zeta'] + for item in mdl_dict: + det = item.split(sep='.')[0] + if detector.lower() == det: + arr_name = item.split(sep='.')[1] + if arr_name == 'gamma': + gamma = np.array(mdl_dict[item]) + elif arr_name == 'zeta': + zeta = np.array(mdl_dict[item]) + if gamma is not None and zeta is not None: break return gamma, zeta -def apply_conv_kernel(datamodel, conv_kernel_model, sigreject=4, gausssmooth=1, halfwith=30): +def apply_conv_kernel(datamodel, kernels, sigreject=4): """ Apply the convolution kernel. @@ -108,17 +110,12 @@ def apply_conv_kernel(datamodel, conv_kernel_model, sigreject=4, gausssmooth=1, datamodel : `~jwst.datamodel` Data model containing the data to be corrected - conv_kernel_model : `~jwst.datamodels.ConvKernelModel` - Data model containing the Fourier coefficients from the reference files + kernels : list + List containing the left and right kernels sigreject: int Number of sigmas to reject as outliers - gausssmooth : int - Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients - - halfwith : int - Half-width of convolution kernel to build from reference file's coefficients Returns: -------- @@ -130,10 +127,8 @@ def apply_conv_kernel(datamodel, conv_kernel_model, sigreject=4, gausssmooth=1, data = datamodel.data.copy()[0, :, :, :] data = data.astype(float) npix = data.shape[-1] - detector = datamodel.meta.instrument.detector - - kernels_l, kernels_r = make_kernels(conv_kernel_model, detector, gausssmooth, halfwith) + kernels_l, kernels_r = kernels nchan = len(kernels_l) # The subtraction below is needed to flag outliers in the reference pixels. diff --git a/jwst/refpix/reference_pixels.py b/jwst/refpix/reference_pixels.py index 52fe22310f..8e777723f3 100644 --- a/jwst/refpix/reference_pixels.py +++ b/jwst/refpix/reference_pixels.py @@ -51,7 +51,7 @@ from ..lib import pipe_utils, reffile_utils from .irs2_subtract_reference import make_irs2_mask -from .optimized_convolution import apply_conv_kernel +from .optimized_convolution import make_kernels, apply_conv_kernel log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) @@ -168,6 +168,9 @@ class Dataset: flag that controls whether odd and even-numbered rows are handled separately (MIR only) + conv_kernel_params : dict + Dictionary containing the parameters needed for the optimized convolution kernel + """ def __init__(self, input_model, @@ -175,8 +178,15 @@ def __init__(self, input_model, use_side_ref_pixels, side_smoothing_length, side_gain, + conv_kernel_params, odd_even_rows): + self.use_conv_kernel = conv_kernel_params['use_conv_kernel'] + self.conv_kernel_model = conv_kernel_params['conv_kernel_model'] + self.sigreject = conv_kernel_params['sigreject'] + self.gaussmooth = conv_kernel_params['gaussmooth'] + self.halfwidth = conv_kernel_params['halfwidth'] + if (input_model.meta.subarray.xstart is None or input_model.meta.subarray.ystart is None or input_model.meta.subarray.xsize is None or @@ -374,12 +384,17 @@ def log_parameters(self): if not self.is_subarray: log.info('NIR full frame data') log.info('The following parameters are valid for this mode:') - log.info(f'use_side_ref_pixels = {self.use_side_ref_pixels}') - log.info(f'odd_even_columns = {self.odd_even_columns}') - log.info(f'side_smoothing_length = {self.side_smoothing_length}') - log.info(f'side_gain = {self.side_gain}') - log.info('The following parameter is not applicable and is ignored:') - log.info(f'odd_even_rows = {self.odd_even_rows}') + if not self.use_conv_kernel: + log.info(f'use_side_ref_pixels = {self.use_side_ref_pixels}') + log.info(f'odd_even_columns = {self.odd_even_columns}') + log.info(f'side_smoothing_length = {self.side_smoothing_length}') + log.info(f'side_gain = {self.side_gain}') + log.info('The following parameter is not applicable and is ignored:') + log.info(f'odd_even_rows = {self.odd_even_rows}') + else: + log.info(f'sigreject = {self.sigreject}') + log.info(f'gaussmooth = {self.gaussmooth}') + log.info(f'halfwidth = {self.halfwidth}') else: log.info('NIR subarray data') # Transform the pixeldq array from DMS to detector coords @@ -492,20 +507,8 @@ class NIRDataset(Dataset): side_gain: float gain to use in applying the side reference pixel correction - use_conv_kernel : boolean - If True an optimized convolution kernel will be used instead of the running median - - conv_kernel_model : `~jwst.datamodels.ConvKernelModel` - Data model containing the Fourier coefficients from the reference files - - sigreject: integer - Number of sigmas to reject as outliers - - gausssmooth : integer - Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients - - halfwith : integer - Half-width of convolution kernel to build from reference file's coefficients + conv_kernel_params : dict + Dictionary containing the parameters needed for the optimized convolution kernel """ @@ -514,17 +517,14 @@ def __init__(self, input_model, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=False, - conv_kernel_model=None, - sigreject=None, - gausssmooth=None, - halfwith=None): + conv_kernel_params): super(NIRDataset, self).__init__(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, + conv_kernel_params, odd_even_rows=False) # Set appropriate NIR sections @@ -1189,20 +1189,26 @@ def do_fullframe_corrections(self): """Do Reference Pixels Corrections for all amplifiers, NIR detectors First read of each integration is NOT subtracted, as the signal is removed in the superbias subtraction step""" - # - # First transform pixeldq array to detector coordinates - self.DMS_to_detector_dq() - - print('\n ***** at the do_fullframe_correction function') - print(self.conv_kernel_model, self.sigreject, self.gaussmooth, self.halfwidth) + continue_apply_conv_kernel = False if self.use_conv_kernel: + kernels = make_kernels(self.conv_kernel_model, + self.input_model.meta.instrument.detector, + self.gaussmooth, + self.halfwidth) + if kernels is None: + log.info('The REFPIX step will use the running median') + else: + continue_apply_conv_kernel = False + if continue_apply_conv_kernel: self.input_model = apply_conv_kernel(self.input_model, - self.conv_kernel_model, - sigreject=self.sigreject, - gausssmooth=self.gaussmooth, - halfwith=self.halfwidth) + kernels, + sigreject=self.sigreject) else: + # + # First transform pixeldq array to detector coordinates + self.DMS_to_detector_dq() + for integration in range(self.nints): for group in range(self.ngroups): # @@ -1955,11 +1961,7 @@ def create_dataset(input_model, side_smoothing_length, side_gain, odd_even_rows, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith): + conv_kernel_params): """Create a dataset object from an input model. Parameters: @@ -1988,20 +1990,8 @@ def create_dataset(input_model, flag that controls whether odd and even-numbered rows are handled separately (MIR only) - use_conv_kernel : bool - If True an optimized convolution kernel will be used instead of the running median - - conv_kernel_model : `~jwst.datamodels.ConvKernelModel` - Data model containing the Fourier coefficients from the reference files - - sigreject: int - Number of sigmas to reject as outliers - - gausssmooth : int - Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients - - halfwith : int - Half-width of convolution kernel to build from reference file's coefficients + conv_kernel_params : dict + Dictionary containing the parameters needed for the optimized convolution kernel """ detector = input_model.meta.instrument.detector @@ -2023,165 +2013,105 @@ def create_dataset(input_model, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRS2': return NRS2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCA1': return NRCA1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCA2': return NRCA2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCA3': return NRCA3Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCA4': return NRCA4Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCALONG': return NRCALONGDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCB1': return NRCB1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCB2': return NRCB2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCB3': return NRCB3Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCB4': return NRCB4Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NRCBLONG': return NRCBLONGDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'NIS': return NIRISSDataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'GUIDER1': return GUIDER1Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) elif detector == 'GUIDER2': return GUIDER2Dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) else: log.error('Unrecognized detector') return NIRDataset(input_model, @@ -2189,22 +2119,14 @@ def create_dataset(input_model, use_side_ref_pixels, side_smoothing_length, side_gain, - use_conv_kernel=use_conv_kernel, - conv_kernel_model=conv_kernel_model, - sigreject=sigreject, - gausssmooth=gausssmooth, - halfwith=halfwith) + conv_kernel_params) def correct_model(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, odd_even_rows, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith): + conv_kernel_params): """Wrapper to do Reference Pixel Correction on a JWST Model. Performs the correction on the datamodel @@ -2234,20 +2156,8 @@ def correct_model(input_model, odd_even_columns, flag that controls whether odd and even-numbered rows are handled separately (MIR only) - use_conv_kernel : bool - If True an optimized convolution kernel will be used instead of the running median - - conv_kernel_model : `~jwst.datamodels.ConvKernelModel` - Data model containing the Fourier coefficients from the reference files - - sigreject: int - Number of sigmas to reject as outliers - - gausssmooth : int - Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients - - halfwith : int - Half-width of convolution kernel to build from reference file's coefficients + conv_kernel_params : dict + Dictionary containing the parameters needed for the optimized convolution kernel """ if input_model.meta.instrument.name == 'MIRI': @@ -2261,11 +2171,7 @@ def correct_model(input_model, odd_even_columns, side_smoothing_length, side_gain, odd_even_rows, - use_conv_kernel, - conv_kernel_model, - sigreject, - gausssmooth, - halfwith) + conv_kernel_params) if input_dataset is None: status = SUBARRAY_DOESNTFIT diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 76869598aa..52c905b8d9 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -27,8 +27,8 @@ class RefPixStep(Step): irs2_mean_subtraction = boolean(default=False) # Apply a mean offset subtraction before IRS2 correction use_conv_kernel = boolean(default=True) # For NIR data only, use the convolution kernel instead of the running median sigreject = integer(default=4) # Number of sigmas to reject as outliers - gausssmooth = integer(default=1) # Width of Gaussian smoothing kernel to use as a low-pass filter - halfwith = integer(default=30) # Half-width of convolution kernel to build + gaussmooth = integer(default=1) # Width of Gaussian smoothing kernel to use as a low-pass filter + halfwidth = integer(default=30) # Half-width of convolution kernel to build user_supplied_reffile = string(default=None) # ASDF user-supplied reference file """ @@ -96,9 +96,6 @@ def process(self, input): if not self.use_conv_kernel: conv_kernel_model = None else: - import asdf - conv_kernel_model = asdf.open(self.user_supplied_reffile) - ''' if self.user_supplied_reffile is None: conv_kernel_ref_filename = self.get_reference_file(datamodel, 'refpix') if conv_kernel_ref_filename == 'N/A': @@ -111,19 +108,21 @@ def process(self, input): else: self.log.info('Using user-supplied reference file: {}'.format(self.user_supplied_reffile)) conv_kernel_model = datamodels.ConvKernelModel(self.user_supplied_reffile) - ''' + + conv_kernel_params = { + 'use_conv_kernel': self.use_conv_kernel, + 'conv_kernel_model': conv_kernel_model, + 'sigreject': self.sigreject, + 'gaussmooth': self.gaussmooth, + 'halfwidth': self.halfwidth + } status = reference_pixels.correct_model(datamodel, self.odd_even_columns, self.use_side_ref_pixels, self.side_smoothing_length, self.side_gain, self.odd_even_rows, - self.use_conv_kernel, - conv_kernel_model, - self.sigreject, - self.gausssmooth, - self.halfwith - ) + conv_kernel_params) if status == reference_pixels.REFPIX_OK: datamodel.meta.cal_step.refpix = 'COMPLETE' From 0499d27eaaf4ee8e7b10ba0a97217b33882c27d8 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Fri, 30 Aug 2024 10:48:04 -0400 Subject: [PATCH 05/14] add message when processing zero frame --- jwst/refpix/reference_pixels.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/jwst/refpix/reference_pixels.py b/jwst/refpix/reference_pixels.py index 8e777723f3..deb0307a5d 100644 --- a/jwst/refpix/reference_pixels.py +++ b/jwst/refpix/reference_pixels.py @@ -1191,7 +1191,7 @@ def do_fullframe_corrections(self): in the superbias subtraction step""" continue_apply_conv_kernel = False - if self.use_conv_kernel: + if self.use_conv_kernel and self.conv_kernel_model is not None: kernels = make_kernels(self.conv_kernel_model, self.input_model.meta.instrument.detector, self.gaussmooth, @@ -2202,6 +2202,7 @@ def reference_pixel_correction(input_dataset): input_dataset.do_corrections() if input_dataset.input_model.meta.exposure.zero_frame: + log.info('Processing the zero frame') process_zeroframe_correction(input_dataset) return From 8875b130c705391dec688deb4391ff19e5fa3da9 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Fri, 30 Aug 2024 10:51:30 -0400 Subject: [PATCH 06/14] adding the PR number --- CHANGES.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 6672bf030a..0792bce015 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -73,6 +73,15 @@ set_telescope_pointing - replace usage of ``copy_arrays=True`` with ``memmap=False`` [#8660] +- Refactored separate modes into submodules instead of inheriting from a base class. + Moved non-JWST-specific code to stcal. [#8613] + +refpix +------ + +- Add optimized convolution kernel instead of the running median for NIR + fullframe data. [#8726] + resample_spec ------------- From c83c338cc9497641a825fb788405ec0f4d0ba0b1 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Thu, 5 Sep 2024 10:59:08 -0400 Subject: [PATCH 07/14] adding unit tests --- jwst/refpix/optimized_convolution.py | 6 +- jwst/refpix/reference_pixels.py | 55 +++++++++------- .../tests/test_oprimized_convolution.py | 65 +++++++++++++++++++ jwst/refpix/tests/test_refpix.py | 54 ++++++++------- 4 files changed, 126 insertions(+), 54 deletions(-) create mode 100644 jwst/refpix/tests/test_oprimized_convolution.py diff --git a/jwst/refpix/optimized_convolution.py b/jwst/refpix/optimized_convolution.py index 8ba491fdc9..d579f6c1aa 100644 --- a/jwst/refpix/optimized_convolution.py +++ b/jwst/refpix/optimized_convolution.py @@ -1,6 +1,6 @@ # # Module for using Reference Pixels to improve the 1/f noise, to be -# used be only for non-IRS2 data +# used be only for non-IRS2 NIR data # import logging @@ -38,7 +38,6 @@ def make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth): """ gamma, zeta = get_conv_kernel_coeffs(conv_kernel_model, detector) - gamma=None if gamma is None or zeta is None: log.info(f'Optimized convolution kernel coefficients NOT found for detector {detector}') return None @@ -89,7 +88,7 @@ def get_conv_kernel_coeffs(conv_kernel_model, detector): gamma, zeta = None, None for item in mdl_dict: det = item.split(sep='.')[0] - if detector.lower() == det: + if detector.lower() == det.lower(): arr_name = item.split(sep='.')[1] if arr_name == 'gamma': gamma = np.array(mdl_dict[item]) @@ -123,7 +122,6 @@ def apply_conv_kernel(datamodel, kernels, sigreject=4): datamodel : `~jwst.datamodel` Data model with convolution """ - data = datamodel.data.copy()[0, :, :, :] data = data.astype(float) npix = data.shape[-1] diff --git a/jwst/refpix/reference_pixels.py b/jwst/refpix/reference_pixels.py index deb0307a5d..68bc20ca37 100644 --- a/jwst/refpix/reference_pixels.py +++ b/jwst/refpix/reference_pixels.py @@ -1191,6 +1191,8 @@ def do_fullframe_corrections(self): in the superbias subtraction step""" continue_apply_conv_kernel = False + # Check if convolution kernels for this detector are in the reference file + # and if not, proceed with side-pixel correction as usual if self.use_conv_kernel and self.conv_kernel_model is not None: kernels = make_kernels(self.conv_kernel_model, self.input_model.meta.instrument.detector, @@ -1199,35 +1201,38 @@ def do_fullframe_corrections(self): if kernels is None: log.info('The REFPIX step will use the running median') else: - continue_apply_conv_kernel = False + continue_apply_conv_kernel = True + # Make sure the side pixel correction is off before applying convolution + self.use_side_ref_pixels = False + # + # First transform pixeldq array to detector coordinates + self.DMS_to_detector_dq() + + for integration in range(self.nints): + for group in range(self.ngroups): + # + # Get the reference values from the top and bottom reference + # pixels + # + self.DMS_to_detector(integration, group) + thisgroup = self.group + refvalues = self.get_refvalues(thisgroup) + self.do_top_bottom_correction(thisgroup, refvalues) + if self.use_side_ref_pixels: + corrected_group = self.do_side_correction(thisgroup) + self.group = corrected_group + else: + self.group = thisgroup + # + # Now transform back from detector to DMS coordinates. + self.detector_to_DMS(integration, group) + # + # Apply optimized convolution kernel if continue_apply_conv_kernel: self.input_model = apply_conv_kernel(self.input_model, kernels, sigreject=self.sigreject) - else: - # - # First transform pixeldq array to detector coordinates - self.DMS_to_detector_dq() - - for integration in range(self.nints): - for group in range(self.ngroups): - # - # Get the reference values from the top and bottom reference - # pixels - # - self.DMS_to_detector(integration, group) - thisgroup = self.group - refvalues = self.get_refvalues(thisgroup) - self.do_top_bottom_correction(thisgroup, refvalues) - if self.use_side_ref_pixels: - corrected_group = self.do_side_correction(thisgroup) - self.group = corrected_group - else: - self.group = thisgroup - # - # Now transform back from detector to DMS coordinates. - self.detector_to_DMS(integration, group) - log.setLevel(logging.INFO) + log.setLevel(logging.INFO) return def do_subarray_corrections(self): diff --git a/jwst/refpix/tests/test_oprimized_convolution.py b/jwst/refpix/tests/test_oprimized_convolution.py new file mode 100644 index 0000000000..dd7e6fb9ee --- /dev/null +++ b/jwst/refpix/tests/test_oprimized_convolution.py @@ -0,0 +1,65 @@ +import numpy as np +from stdatamodels.jwst.datamodels import Level1bModel, ConvKernelModel +from jwst.refpix.optimized_convolution import make_kernels, get_conv_kernel_coeffs, apply_conv_kernel + + +# create the ConvKernelModel +ckm = {'nrcb1': { + 'gamma': np.array([[0.8737859+0.j, 0.72877103-0.01848215j, 0.7474708+0.00441926j, + 0.7596158-0.01682704j, 0.7710808-0.00618939j], + [0.37835783+0.j, 0.27234325-0.03058944j, 0.38302818+0.03056235j, + 0.36819065-0.02578794j, 0.3908449+0.02115744j], + [0.36443716+0.j, 0.335223+0.02436169j, 0.32699308-0.02325623j, + 0.3830375-0.01340938j, 0.39612782+0.00736016j], + [0.00335188+0.j, 0.01759672-0.01073076j, 0.04302938+0.00353758j, + 0.08149841-0.00643084j, 0.07274915-0.002046j]]), + 'zeta': np.array([[0.14007446+0.0000000e+00j, 0.2371146+1.6455967e-02j, + 0.22727127-5.9413449e-03j, 0.2090475+7.0676603e-03j, + 0.20298977+2.2992526e-05j], + [0.6206608+0.j, 0.680701+0.02468053j, 0.57776874-0.03374288j, + 0.5873975+0.01647749j, 0.5693782-0.02531039j], + [0.6543285+0.j, 0.6167225-0.02665404j, 0.6405862+0.01494319j, + 0.57719606+0.00970044j, 0.57160926-0.01088286j], + [1.0137521+0.j, 0.9492664+0.0071805j, 0.92866725-0.00784425j, + 0.8868761-0.00237024j, 0.89918566-0.00323711j]]) + } + } +conv_kernel_model = ConvKernelModel(ckm) + + +def mk_data_mdl(data, instrument, detector): + # create input_model + input_model = Level1bModel(data=data) + input_model.meta.instrument.name = instrument + input_model.meta.instrument.detector = detector + input_model.meta.subarray.name = 'FULL' + return input_model + + +def test_get_conv_kernel_coeffs(): + detector = 'NRCB1' + gamma, zeta = get_conv_kernel_coeffs(conv_kernel_model, detector) + assert gamma is not None + assert zeta is not None + + +def test_mk_kernels(): + detector = 'nothing' + gaussmooth = 1 + halfwidth = 30 + kernels = make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth) + assert kernels is None + + +def test_apply_conv_kernel(): + data = np.zeros((1, 1, 2048, 2048)) + 1.999 + instrument, detector = 'NIRCAM', 'NRCB1' + input_model = mk_data_mdl(data, instrument, detector) + gaussmooth = 1 + halfwidth = 30 + kernels = make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth) + sigreject = 4 + result = apply_conv_kernel(input_model, kernels, sigreject=sigreject) + compare = np.ones((1, 1, 2048, 2048)) + assert compare.all() == result.data.all() + diff --git a/jwst/refpix/tests/test_refpix.py b/jwst/refpix/tests/test_refpix.py index f7d5fccaeb..cf123da24b 100644 --- a/jwst/refpix/tests/test_refpix.py +++ b/jwst/refpix/tests/test_refpix.py @@ -9,7 +9,7 @@ def test_refpix_subarray_miri(): - '''Check that the correction is skipped for MIR subarray data ''' + """Check that the correction is skipped for MIR subarray data """ # For MIRI, no reference pixel correction is performed on subarray data # No changes should be seen in the data arrays before and after correction @@ -73,8 +73,8 @@ def test_refpix_subarray_nirspec(subarray, ysize, xsize): def test_each_amp(): - '''Test that each amp is calculated separately using the average of left - and right pixels''' + """Test that each amp is calculated separately using the average of left + and right pixels""" # create input data # create model of data with 0 value array @@ -110,7 +110,7 @@ def test_each_amp(): def test_firstframe_sub(): - '''For MIR data, check that the first group is subtracted from each group in an integration + """For MIR data, check that the first group is subtracted from each group in an integration and added back in after the correction. This was found in testing the amp step. Make sure that the first frame is @@ -118,7 +118,7 @@ def test_firstframe_sub(): in the first group match the reference pixels in all other groups, then the subtraction will result in zeros, leaving zeros to be calculated as the reference pixel values, and the output data will match the input data after the frame is - added back in. So there should be no change to the data.''' + added back in. So there should be no change to the data.""" # create input data # create model of data with 0 value array @@ -150,7 +150,7 @@ def test_firstframe_sub(): np.testing.assert_array_equal(im.data, outim.data) def test_odd_even(): - '''Check that odd/even rows are applied when flag is set''' + """Check that odd/even rows are applied when flag is set""" # Test that odd and even rows are calculated separately @@ -280,7 +280,7 @@ def test_odd_even_amp_nirspec(detector, ysize, odd_even): def test_no_odd_even(): - '''Check that odd/even rows are not applied if flag is set to False''' + """Check that odd/even rows are not applied if flag is set to False""" # Test that odd and even rows are calculated together # create input data @@ -333,8 +333,8 @@ def test_no_odd_even(): def test_side_averaging(): - '''For MIRI data, check that the mean value in the reference pixels is calculated for each amplifier - using the average of the left and right side reference pixels.''' + """For MIRI data, check that the mean value in the reference pixels is calculated for each amplifier + using the average of the left and right side reference pixels.""" # Test that the left and right side pixels are averaged. # create input data @@ -364,8 +364,8 @@ def test_side_averaging(): def test_above_sigma(): - '''Test that a value greater than 3 sigma above mean of reference pixels is rejected - in the averaging of the reference pixels to be subtracted.''' + """Test that a value greater than 3 sigma above mean of reference pixels is rejected + in the averaging of the reference pixels to be subtracted.""" # create input data # create model of data with 0 value array @@ -395,11 +395,11 @@ def test_above_sigma(): def test_nan_refpix(): - '''Verify that the reference pixels flagged DO_NOT_USE are not used in the calculation + """Verify that the reference pixels flagged DO_NOT_USE are not used in the calculation Test that flagging a reference pixel with DO_NOT_USE does not use the pixel in the average. Set the pixel to NaN, which results in a NaN average value if used. If the test - passes, then the NaN was correctly flagged and rejected from the average.''' + passes, then the NaN was correctly flagged and rejected from the average.""" # create input data # create model of data with 0 value array @@ -430,7 +430,7 @@ def test_nan_refpix(): def test_do_corrections_subarray_no_oddEven(setup_subarray_cube): - '''Test all corrections for subarray data with no even/odd.''' + """Test all corrections for subarray data with no even/odd.""" # Create inputs and subarray SUB320A335R data, and set correction parameters ngroups = 3 @@ -472,7 +472,7 @@ def test_do_corrections_subarray_no_oddEven(setup_subarray_cube): def test_do_corrections_subarray(setup_subarray_cube): - '''Test all corrections for subarray data.''' + """Test all corrections for subarray data.""" # Create inputs and subarray SUB320A335R data, and set correction parameters ngroups = 3 @@ -514,7 +514,7 @@ def test_do_corrections_subarray(setup_subarray_cube): def test_do_corrections_subarray_4amp(setup_subarray_cube): - '''Test all corrections for subarray data.''' + """Test all corrections for subarray data.""" # Create inputs and subarray SUBGRISM64 data, and set correction parameters ngroups = 3 @@ -583,7 +583,7 @@ def test_do_corrections_subarray_4amp(setup_subarray_cube): def test_get_restore_group_subarray(setup_subarray_cube): - '''Test subarray input model data is replaced with group data.''' + """Test subarray input model data is replaced with group data.""" # Create inputs and subarray SUB320A335R data, and set correction parameters ngroups = 3 @@ -622,7 +622,7 @@ def test_get_restore_group_subarray(setup_subarray_cube): def test_do_top_bottom_correction(setup_cube): - '''Test top/bottom correction for NIRCam data.''' + """Test top/bottom correction for NIRCam data.""" ngroups = 3 nrows = 2048 @@ -695,7 +695,7 @@ def test_do_top_bottom_correction(setup_cube): def test_do_top_bottom_correction_no_even_odd(setup_cube): - '''Test top/bottom correction with no even/odd.''' + """Test top/bottom correction with no even/odd.""" ngroups = 3 nrows = 2048 @@ -750,7 +750,7 @@ def test_do_top_bottom_correction_no_even_odd(setup_cube): def make_rampmodel(ngroups, ysize, xsize, instrument='MIRI', fill_value=None): - '''Make MIRI or NIRSpec ramp model for testing''' + """Make MIRI or NIRSpec ramp model for testing.""" # create the data and groupdq arrays csize = (1, ngroups, ysize, xsize) @@ -796,7 +796,7 @@ def make_rampmodel(ngroups, ysize, xsize, instrument='MIRI', fill_value=None): @pytest.fixture(scope='function') def setup_cube(): - ''' Set up fake data to test.''' + """ Set up fake data to test.""" def _cube(instr, detector, ngroups, nrows, ncols): @@ -822,7 +822,7 @@ def _cube(instr, detector, ngroups, nrows, ncols): @pytest.fixture(scope='function') def setup_subarray_cube(): - ''' Set up fake NIRCam subarray data to test.''' + """ Set up fake NIRCam subarray data to test.""" def _cube(name, detector, xstart, ystart, ngroups, nrows, ncols): @@ -861,7 +861,7 @@ def _cube(name, detector, xstart, ystart, ngroups, nrows, ncols): ('FGS', "GUIDER2") ]) def test_correct_model(setup_cube, instr, det): - '''Test all corrections for full frame data for all detectors.''' + """Test all corrections for full frame data for all detectors.""" ngroups = 2 nrows = 2048 @@ -879,13 +879,15 @@ def test_correct_model(setup_cube, instr, det): input_model = setup_cube(instr, det, ngroups, nrows, ncols) input_model.data[0, 0, :, :] = rpix input_model.data[0, 0, 4:-4, 4:-4] = dataval + conv_kernel_params = None correct_model(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows) + odd_even_rows, + conv_kernel_params) np.testing.assert_almost_equal(np.mean(input_model.data[0, 0, :4, 4:-4]), 0, decimal=0) np.testing.assert_almost_equal(np.mean(input_model.data[0, 0, 4:-4, 4:-4]), dataval - rpix, decimal=0) @@ -923,13 +925,15 @@ def test_zero_frame(setup_cube): input_model.zeroframe[0, 4:-4, 4:-4] = dataval / 2. input_model.zeroframe[0, 5, 5] = 0. # Test a bad pixel. input_model.meta.exposure.zero_frame = True + conv_kernel_params = None correct_model(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows) + odd_even_rows, + conv_kernel_params) # Make sure the SCI data is as expected. data = np.zeros(input_model.data.shape, dtype=input_model.data.dtype) From be3c9e3ee4e9da2f28f2d7b46bd34402adeaa287 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Thu, 5 Sep 2024 11:27:31 -0400 Subject: [PATCH 08/14] adding missing argument --- jwst/refpix/tests/test_refpix.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/jwst/refpix/tests/test_refpix.py b/jwst/refpix/tests/test_refpix.py index cf123da24b..77dafa4923 100644 --- a/jwst/refpix/tests/test_refpix.py +++ b/jwst/refpix/tests/test_refpix.py @@ -457,12 +457,14 @@ def test_do_corrections_subarray_no_oddEven(setup_subarray_cube): input_model.pixeldq[:4, :] = dqflags.pixel['REFERENCE_PIXEL'] input_model.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL'] + conv_kernel_params = None init_dataset = create_dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows) + odd_even_rows, + conv_kernel_params) init_dataset.do_corrections() @@ -499,12 +501,14 @@ def test_do_corrections_subarray(setup_subarray_cube): input_model.pixeldq[:4, :] = dqflags.pixel['REFERENCE_PIXEL'] input_model.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL'] + conv_kernel_params = None init_dataset = create_dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows) + odd_even_rows, + conv_kernel_params) init_dataset.do_corrections() @@ -570,12 +574,14 @@ def test_do_corrections_subarray_4amp(setup_subarray_cube): input_model.pixeldq[:, :4] = dqflags.pixel['REFERENCE_PIXEL'] input_model.pixeldq[:, -4:] = dqflags.pixel['REFERENCE_PIXEL'] + conv_kernel_params = None init_dataset = create_dataset(input_model, odd_even_columns, use_side_ref_pixels, side_smoothing_length, side_gain, - odd_even_rows) + odd_even_rows, + conv_kernel_params) init_dataset.do_corrections() From 5d6798a9fdc6e21d46a6283d769d81ff2b897405 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Fri, 6 Sep 2024 13:27:20 -0400 Subject: [PATCH 09/14] adding documentation --- docs/jwst/refpix/arguments.rst | 27 +++++++++++++++++++++++++++ docs/jwst/refpix/description.rst | 2 ++ jwst/refpix/optimized_convolution.py | 8 ++++---- jwst/refpix/refpix_step.py | 10 +++++----- jwst/regtest/test_nircam_image.py | 1 + 5 files changed, 39 insertions(+), 9 deletions(-) diff --git a/docs/jwst/refpix/arguments.rst b/docs/jwst/refpix/arguments.rst index 59126c2db4..0835e2f2c9 100644 --- a/docs/jwst/refpix/arguments.rst +++ b/docs/jwst/refpix/arguments.rst @@ -52,3 +52,30 @@ in IRS2 mode will be processed along with the normal pixels and preserved in the output. This option is intended for calibration or diagnostic reductions only. For normal science operation, this argument should always be False, so that interleaved pixels are stripped before continuing processing. + +* ``--use_conv_kernel`` + +If the ``use_conv_kernel`` argument is set to False, all NIR full-frame data, +will be processed using the running median and the side-pixel correction. The +default value is set to True, which turns off the side-pixed correction and +use an optimized convolution kernel instead of the running median. + +* ``--sigreject`` + +The ``sigreject`` argument is the number of sigmas to reject as outliers in the +optimized convolution kernel algorithm. The value is expected to be a float. + +* ``--gaussmooth`` + +The ``gaussmooth`` argument is the width of Gaussian smoothing kernel to use as +a low-pass filter. The numerical value is expected to be a float. + +* ``--halfwidth`` + +The ``halfwidth`` argument is the half-width of convolution kernel to build. The +numerical value is expected to be a float. + +* ``--user_supplied_reffile`` + +The ``user_supplied_reffile`` argument is the name of the ASDF user-supplied +reference file for the optimized convolution kernel algorithm. diff --git a/docs/jwst/refpix/description.rst b/docs/jwst/refpix/description.rst index f5fb2d8701..5fc8b5be36 100644 --- a/docs/jwst/refpix/description.rst +++ b/docs/jwst/refpix/description.rst @@ -76,6 +76,8 @@ NIR Detector Data (set by the step parameter ``side_gain``, which defaults to 1.0) is subtracted from the full group on a row-by-row basis. Note that the ``odd_even_rows`` parameter is ignored for NIR data when the side reference pixels are processed. + If the ``--use_conv_kernel`` option is set, the side-pixel correction will be + turned off and instead, an optimized convolution kernel will be used. #. Transform the data back to the JWST focal plane, or DMS, frame. MIR Detector Data diff --git a/jwst/refpix/optimized_convolution.py b/jwst/refpix/optimized_convolution.py index d579f6c1aa..1d8f089934 100644 --- a/jwst/refpix/optimized_convolution.py +++ b/jwst/refpix/optimized_convolution.py @@ -23,10 +23,10 @@ def make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth): detector : str Name of the detector of the input data - gaussmooth : int + gaussmooth : float Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients - halfwidth : int + halfwidth : float Half-width of convolution kernel to build from reference file's coefficients Returns: @@ -99,7 +99,7 @@ def get_conv_kernel_coeffs(conv_kernel_model, detector): return gamma, zeta -def apply_conv_kernel(datamodel, kernels, sigreject=4): +def apply_conv_kernel(datamodel, kernels, sigreject=4.0): """ Apply the convolution kernel. @@ -112,7 +112,7 @@ def apply_conv_kernel(datamodel, kernels, sigreject=4): kernels : list List containing the left and right kernels - sigreject: int + sigreject: float Number of sigmas to reject as outliers diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 52c905b8d9..6b5a4556c5 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -25,11 +25,11 @@ class RefPixStep(Step): ovr_corr_mitigation_ftr = float(default=3.0) # Factor to avoid overcorrection of bad reference pixels for IRS2 preserve_irs2_refpix = boolean(default=False) # Preserve reference pixels in output irs2_mean_subtraction = boolean(default=False) # Apply a mean offset subtraction before IRS2 correction - use_conv_kernel = boolean(default=True) # For NIR data only, use the convolution kernel instead of the running median - sigreject = integer(default=4) # Number of sigmas to reject as outliers - gaussmooth = integer(default=1) # Width of Gaussian smoothing kernel to use as a low-pass filter - halfwidth = integer(default=30) # Half-width of convolution kernel to build - user_supplied_reffile = string(default=None) # ASDF user-supplied reference file + use_conv_kernel = boolean(default=True) # For NIR full-frame data, use convolution kernel instead of running median + sigreject = float(default=4.0) # Number of sigmas to reject as outliers + gaussmooth = float(default=1.0) # Width of Gaussian smoothing kernel to use as a low-pass filter + halfwidth = float(default=30.0) # Half-width of convolution kernel to build + user_supplied_reffile = string(default=None) # ASDF user-supplied reference file for convolution kernel """ reference_file_types = ['refpix'] diff --git a/jwst/regtest/test_nircam_image.py b/jwst/regtest/test_nircam_image.py index 11283baf01..48c021729d 100644 --- a/jwst/regtest/test_nircam_image.py +++ b/jwst/regtest/test_nircam_image.py @@ -22,6 +22,7 @@ def run_detector1pipeline(rtdata_module): "--steps.saturation.save_results=True", "--steps.superbias.save_results=True", "--steps.refpix.save_results=True", + "--steps.refpix.use_conv_kernel=False", "--steps.linearity.save_results=True", "--steps.dark_current.save_results=True", "--steps.jump.save_results=True", From 89374dc60875a26b1ba26a458bb4fc7738700e91 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Wed, 11 Sep 2024 13:53:25 -0400 Subject: [PATCH 10/14] smoothing integration of code --- docs/jwst/refpix/arguments.rst | 2 +- jwst/refpix/optimized_convolution.py | 125 +++++++++++++-------------- jwst/refpix/reference_pixels.py | 47 +++++----- jwst/refpix/refpix_step.py | 2 +- 4 files changed, 86 insertions(+), 90 deletions(-) diff --git a/docs/jwst/refpix/arguments.rst b/docs/jwst/refpix/arguments.rst index 0835e2f2c9..17d607a536 100644 --- a/docs/jwst/refpix/arguments.rst +++ b/docs/jwst/refpix/arguments.rst @@ -73,7 +73,7 @@ a low-pass filter. The numerical value is expected to be a float. * ``--halfwidth`` The ``halfwidth`` argument is the half-width of convolution kernel to build. The -numerical value is expected to be a float. +numerical value is expected to be an integer. * ``--user_supplied_reffile`` diff --git a/jwst/refpix/optimized_convolution.py b/jwst/refpix/optimized_convolution.py index 1d8f089934..f843acdfc9 100644 --- a/jwst/refpix/optimized_convolution.py +++ b/jwst/refpix/optimized_convolution.py @@ -26,7 +26,7 @@ def make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth): gaussmooth : float Width of Gaussian smoothing kernel to use as a low-pass filter on reference file's coefficients - halfwidth : float + halfwidth : int Half-width of convolution kernel to build from reference file's coefficients Returns: @@ -99,30 +99,31 @@ def get_conv_kernel_coeffs(conv_kernel_model, detector): return gamma, zeta -def apply_conv_kernel(datamodel, kernels, sigreject=4.0): +def apply_conv_kernel(data, kernels, zeroim, sigreject=4.0): """ Apply the convolution kernel. Parameters: ----------- - datamodel : `~jwst.datamodel` - Data model containing the data to be corrected + data : 2-D numpy array + Data to be corrected kernels : list List containing the left and right kernels + zeroim : 2-D numpy array + First group of first integration, to find outliers + sigreject: float Number of sigmas to reject as outliers - Returns: -------- - datamodel : `~jwst.datamodel` + data : 2-D numpy array Data model with convolution """ - data = datamodel.data.copy()[0, :, :, :] data = data.astype(float) npix = data.shape[-1] @@ -130,61 +131,57 @@ def apply_conv_kernel(datamodel, kernels, sigreject=4.0): nchan = len(kernels_l) # The subtraction below is needed to flag outliers in the reference pixels. - zeroim = data[0].astype(float) - data -= zeroim[np.newaxis, :, :] - - for i in range(data.shape[0]): - L = data[i, :, :4] - R = data[i, :, -4:] - - # Find the approximate standard deviations of the reference pixels - # using an outlier-robust median approach. Mask pixels that differ - # by more than sigreject sigma from this level. - # NOTE: The Median Absolute Deviation (MAD) is calculated as the - # median of the absolute differences between data values and their - # median. For normal distribution MAD is equal to 1.48 times the - # standard deviation but is a more robust estimate of the dispersion - # of data values.The calculation of MAD is straightforward but - # time-consuming, especially if MAD estimates are needed for the - # local environment around every pixel of a large image. The - # calculation is MAD = np.median(np.abs(x-np.median(x))). - # Reference: https://www.interstellarmedium.org/numerical_tools/mad/ - MAD = 1.48 - medL = np.median(L) - sigL = MAD * np.median(np.abs(L - medL)) - medR = np.median(R) - sigR = MAD * np.median(np.abs(R - medR)) - - # nL and nR are the number of good reference pixels in the left and right - # channel in each row. These will be used in lieu of replacing the values - # of those pixels directly. - goodL = 1 * (np.abs(L - medL) <= sigreject * sigL) - nL = np.sum(goodL, axis=1) - goodR = 1 * (np.abs(R - medR) <= sigreject * sigR) - nR = np.sum(goodR, axis=1) - - # Average of the left and right channels, replacing masked pixels with zeros. - # Appropriate normalization factors will be computed later. - L = np.sum(L * goodL, axis=1) / 4 - R = np.sum(R * goodR, axis=1) / 4 - for chan in range(nchan): - kernel_l = kernels_l[chan] - kernel_r = kernels_r[chan] - - # Compute normalizations so that we don't have to directly - # replace the values of flagged/masked reference pixels. - normL = np.convolve(np.ones(nL.shape), kernel_l, mode='same') - normL /= np.convolve(nL / 4, kernel_l, mode='same') - normR = np.convolve(np.ones(nR.shape), kernel_r, mode='same') - normR /= np.convolve(nR / 4, kernel_r, mode='same') - template = np.convolve(L, kernel_l, mode='same') * normL - template += np.convolve(R, kernel_r, mode='same') * normR - data[i, :, chan * npix * 3 // 4:(chan + 1) * npix * 3 // 4] -= template[:, np.newaxis] - - # Add the first read back in. - data += zeroim[np.newaxis, :, :] - datamodel.data[0, :, :, :] = data - - log.info('Optimized convolution kernel applied') - return datamodel + zeroim = zeroim.astype(float) + data -= zeroim + + L = data[:, :4] + R = data[:, -4:] + + # Find the approximate standard deviations of the reference pixels + # using an outlier-robust median approach. Mask pixels that differ + # by more than sigreject sigma from this level. + # NOTE: The Median Absolute Deviation (MAD) is calculated as the + # median of the absolute differences between data values and their + # median. For normal distribution MAD is equal to 1.48 times the + # standard deviation but is a more robust estimate of the dispersion + # of data values.The calculation of MAD is straightforward but + # time-consuming, especially if MAD estimates are needed for the + # local environment around every pixel of a large image. The + # calculation is MAD = np.median(np.abs(x-np.median(x))). + # Reference: https://www.interstellarmedium.org/numerical_tools/mad/ + MAD = 1.48 + medL = np.median(L) + sigL = MAD * np.median(np.abs(L - medL)) + medR = np.median(R) + sigR = MAD * np.median(np.abs(R - medR)) + + # nL and nR are the number of good reference pixels in the left and right + # channel in each row. These will be used in lieu of replacing the values + # of those pixels directly. + goodL = 1 * (np.abs(L - medL) <= sigreject * sigL) + nL = np.sum(goodL, axis=1) + goodR = 1 * (np.abs(R - medR) <= sigreject * sigR) + nR = np.sum(goodR, axis=1) + + # Average of the left and right channels, replacing masked pixels with zeros. + # Appropriate normalization factors will be computed later. + L = np.sum(L * goodL, axis=1) / 4 + R = np.sum(R * goodR, axis=1) / 4 + for chan in range(nchan): + kernel_l = kernels_l[chan] + kernel_r = kernels_r[chan] + + # Compute normalizations so that we don't have to directly + # replace the values of flagged/masked reference pixels. + normL = np.convolve(np.ones(nL.shape), kernel_l, mode='same') + normL /= np.convolve(nL / 4, kernel_l, mode='same') + normR = np.convolve(np.ones(nR.shape), kernel_r, mode='same') + normR /= np.convolve(nR / 4, kernel_r, mode='same') + template = np.convolve(L, kernel_l, mode='same') * normL + template += np.convolve(R, kernel_r, mode='same') * normR + data[:, chan * npix * 3 // 4:(chan + 1) * npix * 3 // 4] -= template[:, np.newaxis] + + data += zeroim + log.debug('Optimized convolution kernel applied') + return data diff --git a/jwst/refpix/reference_pixels.py b/jwst/refpix/reference_pixels.py index 68bc20ca37..eb2db08fc9 100644 --- a/jwst/refpix/reference_pixels.py +++ b/jwst/refpix/reference_pixels.py @@ -1170,10 +1170,29 @@ def do_side_correction(self, group): """ - left = self.calculate_side_ref_signal(group, 0, 3) - right = self.calculate_side_ref_signal(group, 2044, 2047) - sidegroup = self.combine_ref_signals(left, right) - corrected_group = self.apply_side_correction(group, sidegroup) + continue_apply_conv_kernel = False + # Check if convolution kernels for this detector are in the reference file + # and if not, proceed with side-pixel correction as usual + if self.use_conv_kernel and self.conv_kernel_model is not None: + kernels = make_kernels(self.conv_kernel_model, + self.input_model.meta.instrument.detector, + self.gaussmooth, + self.halfwidth) + if kernels is None: + log.info('The REFPIX step will use the running median') + else: + continue_apply_conv_kernel = True + # + # Apply optimized convolution kernel + if continue_apply_conv_kernel: + corrected_group = apply_conv_kernel(group, kernels, self.input_model.data[0, 0, ...], + sigreject=self.sigreject) + else: + # use running median + left = self.calculate_side_ref_signal(group, 0, 3) + right = self.calculate_side_ref_signal(group, 2044, 2047) + sidegroup = self.combine_ref_signals(left, right) + corrected_group = self.apply_side_correction(group, sidegroup) return corrected_group def do_corrections(self): @@ -1190,20 +1209,6 @@ def do_fullframe_corrections(self): First read of each integration is NOT subtracted, as the signal is removed in the superbias subtraction step""" - continue_apply_conv_kernel = False - # Check if convolution kernels for this detector are in the reference file - # and if not, proceed with side-pixel correction as usual - if self.use_conv_kernel and self.conv_kernel_model is not None: - kernels = make_kernels(self.conv_kernel_model, - self.input_model.meta.instrument.detector, - self.gaussmooth, - self.halfwidth) - if kernels is None: - log.info('The REFPIX step will use the running median') - else: - continue_apply_conv_kernel = True - # Make sure the side pixel correction is off before applying convolution - self.use_side_ref_pixels = False # # First transform pixeldq array to detector coordinates self.DMS_to_detector_dq() @@ -1226,12 +1231,6 @@ def do_fullframe_corrections(self): # # Now transform back from detector to DMS coordinates. self.detector_to_DMS(integration, group) - # - # Apply optimized convolution kernel - if continue_apply_conv_kernel: - self.input_model = apply_conv_kernel(self.input_model, - kernels, - sigreject=self.sigreject) log.setLevel(logging.INFO) return diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 6b5a4556c5..269a1aa1ae 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -28,7 +28,7 @@ class RefPixStep(Step): use_conv_kernel = boolean(default=True) # For NIR full-frame data, use convolution kernel instead of running median sigreject = float(default=4.0) # Number of sigmas to reject as outliers gaussmooth = float(default=1.0) # Width of Gaussian smoothing kernel to use as a low-pass filter - halfwidth = float(default=30.0) # Half-width of convolution kernel to build + halfwidth = integer(default=30) # Half-width of convolution kernel to build user_supplied_reffile = string(default=None) # ASDF user-supplied reference file for convolution kernel """ From 3c005777f5769741c0f3c674c315b72a24e120cb Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Wed, 11 Sep 2024 16:07:36 -0400 Subject: [PATCH 11/14] fixing test to match changes --- jwst/refpix/tests/test_oprimized_convolution.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/jwst/refpix/tests/test_oprimized_convolution.py b/jwst/refpix/tests/test_oprimized_convolution.py index dd7e6fb9ee..36fe11d4f6 100644 --- a/jwst/refpix/tests/test_oprimized_convolution.py +++ b/jwst/refpix/tests/test_oprimized_convolution.py @@ -52,14 +52,14 @@ def test_mk_kernels(): def test_apply_conv_kernel(): - data = np.zeros((1, 1, 2048, 2048)) + 1.999 + data = np.zeros((3, 3, 2048, 2048)) + 1.999 instrument, detector = 'NIRCAM', 'NRCB1' input_model = mk_data_mdl(data, instrument, detector) gaussmooth = 1 halfwidth = 30 kernels = make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth) sigreject = 4 - result = apply_conv_kernel(input_model, kernels, sigreject=sigreject) + result = apply_conv_kernel(input_model.data[1, 1, ...], kernels, data[0, 0, ...], sigreject=sigreject) compare = np.ones((1, 1, 2048, 2048)) - assert compare.all() == result.data.all() + assert compare.all() == result.all() From a4eb93f099c7737f2c64c3ab851a23d310f57e70 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Mon, 23 Sep 2024 15:27:27 -0400 Subject: [PATCH 12/14] adding PR changes --- changes/8726.refpix.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 changes/8726.refpix.rst diff --git a/changes/8726.refpix.rst b/changes/8726.refpix.rst new file mode 100644 index 0000000000..5e5f76eed1 --- /dev/null +++ b/changes/8726.refpix.rst @@ -0,0 +1 @@ +Add optimized convolution kernel instead of the running median for NIR fullframe data. From 1c4d97bc134c7e1d9e8833393e47b2572356ac95 Mon Sep 17 00:00:00 2001 From: Maria Pena-Guerrero Date: Tue, 22 Oct 2024 10:46:52 -0400 Subject: [PATCH 13/14] setting default for use_conv_kernel to False --- jwst/refpix/refpix_step.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwst/refpix/refpix_step.py b/jwst/refpix/refpix_step.py index 577f83d865..6cbbbd3a10 100644 --- a/jwst/refpix/refpix_step.py +++ b/jwst/refpix/refpix_step.py @@ -25,7 +25,7 @@ class RefPixStep(Step): ovr_corr_mitigation_ftr = float(default=3.0) # Factor to avoid overcorrection of bad reference pixels for IRS2 preserve_irs2_refpix = boolean(default=False) # Preserve reference pixels in output irs2_mean_subtraction = boolean(default=False) # Apply a mean offset subtraction before IRS2 correction - use_conv_kernel = boolean(default=True) # For NIR full-frame data, use convolution kernel instead of running median + use_conv_kernel = boolean(default=False) # For NIR full-frame data, use convolution kernel instead of running median sigreject = float(default=4.0) # Number of sigmas to reject as outliers gaussmooth = float(default=1.0) # Width of Gaussian smoothing kernel to use as a low-pass filter halfwidth = integer(default=30) # Half-width of convolution kernel to build From 1af88321f383e49cc3b88d7a4e94013b6d6219b1 Mon Sep 17 00:00:00 2001 From: David Law Date: Tue, 19 Nov 2024 16:43:44 -0500 Subject: [PATCH 14/14] Bugfix to optimized convolution --- jwst/refpix/optimized_convolution.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/jwst/refpix/optimized_convolution.py b/jwst/refpix/optimized_convolution.py index f843acdfc9..a2a79e3968 100644 --- a/jwst/refpix/optimized_convolution.py +++ b/jwst/refpix/optimized_convolution.py @@ -42,8 +42,8 @@ def make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth): log.info(f'Optimized convolution kernel coefficients NOT found for detector {detector}') return None - kernel_left = [] - kernel_right = [] + kernels_left = [] + kernels_right = [] for chan in range(gamma.shape[0]): n = len(gamma[chan]) - 1 kernel_left = np.fft.fftshift(np.fft.irfft(gamma[chan]))[n - halfwidth:n + halfwidth + 1] @@ -56,10 +56,10 @@ def make_kernels(conv_kernel_model, detector, gaussmooth, halfwidth): kernel_right = np.convolve(kernel_right, window, mode='same') kernel_left = np.convolve(kernel_left, window, mode='same') - kernel_right += kernel_right - kernel_left += kernel_left + kernels_right += [kernel_right] + kernels_left += [kernel_left] - return [kernel_left, kernel_right] + return [kernels_left, kernels_right] def get_conv_kernel_coeffs(conv_kernel_model, detector): @@ -130,10 +130,6 @@ def apply_conv_kernel(data, kernels, zeroim, sigreject=4.0): kernels_l, kernels_r = kernels nchan = len(kernels_l) - # The subtraction below is needed to flag outliers in the reference pixels. - zeroim = zeroim.astype(float) - data -= zeroim - L = data[:, :4] R = data[:, -4:] @@ -181,7 +177,6 @@ def apply_conv_kernel(data, kernels, zeroim, sigreject=4.0): template += np.convolve(R, kernel_r, mode='same') * normR data[:, chan * npix * 3 // 4:(chan + 1) * npix * 3 // 4] -= template[:, np.newaxis] - data += zeroim log.debug('Optimized convolution kernel applied') return data